code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path, topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path, topic, art), encoding='UTF-8') as f:
lines = f.read()
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if len(tokens) == 0:
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, '%s.txt' % split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print('Building vocabulary from DUC data')
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
print('Vocabulary size: %d' % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[
'<unk>']) for symbol in seq])
for split in splits:
print('Creating %s DUC data' % split)
data = []
with open(os.path.join(root, '%s.txt' % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:]
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic))
num_words += length
print('%s samples: %d' % (split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, '%s.pkl' % split))
num_sents += len(data)
print('Average length: %.2f' % (num_words / num_sents))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root, 'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path, topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path, topic, art), encoding='UTF-8') as f:
lines = f.read()
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if len(tokens) == 0:
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, '%s.txt' % split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print('Building vocabulary from DUC data')
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
print('Vocabulary size: %d' % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[
'<unk>']) for symbol in seq])
for split in splits:
print('Creating %s DUC data' % split)
data = []
with open(os.path.join(root, '%s.txt' % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:]
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic))
num_words += length
print('%s samples: %d' % (split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, '%s.pkl' % split))
num_sents += len(data)
print('Average length: %.2f' % (num_words / num_sents))
<|reserved_special_token_1|>
import requests
import os
import numpy as np
from bs4 import BeautifulSoup
from nltk import word_tokenize
from collections import Counter
import random
from utils import save_pickle
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root, 'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path, topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path, topic, art), encoding='UTF-8') as f:
lines = f.read()
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if len(tokens) == 0:
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, '%s.txt' % split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print('Building vocabulary from DUC data')
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
print('Vocabulary size: %d' % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[
'<unk>']) for symbol in seq])
for split in splits:
print('Creating %s DUC data' % split)
data = []
with open(os.path.join(root, '%s.txt' % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:]
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic))
num_words += length
print('%s samples: %d' % (split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, '%s.pkl' % split))
num_sents += len(data)
print('Average length: %.2f' % (num_words / num_sents))
<|reserved_special_token_1|>
import requests
import os
import numpy as np
from bs4 import BeautifulSoup
from nltk import word_tokenize
from collections import Counter
import random
from utils import save_pickle
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root,'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path,topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path,topic,art),encoding='UTF-8') as f:
#lines = unicode(f.read(), errors='ignore')
lines = f.read()
#print(type(lines))
#print(i,j)
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
# replace punctuation characters with spaces
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, " ") for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if(len(tokens)==0):
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, "%s.txt"%split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print("Building vocabulary from DUC data")
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
# exclude <bos> and <pad> symbols
print("Vocabulary size: %d" % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([
word_to_idx.get(symbol, word_to_idx['<unk>']) for symbol in seq])
for split in splits:
print("Creating %s DUC data" % split)
data = []
with open(os.path.join(root, "%s.txt" % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:] ###
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic)) ###
num_words += length
print("%s samples: %d" %(split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, "%s.pkl" % split))
num_sents += len(data)
print("Average length: %.2f" %(num_words / num_sents))
|
flexible
|
{
"blob_id": "977841e0bb73cec879fbb1868f1e64102c6d8c1a",
"index": 2119,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path, topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path, topic, art), encoding='UTF-8') as f:\n lines = f.read()\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, ' ') for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if len(tokens) == 0:\n break\n else:\n data.append(' '.join(lines))\nif True:\n random.shuffle(data)\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, '%s.txt' % split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n print('Building vocabulary from DUC data')\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n print('Vocabulary size: %d' % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[\n '<unk>']) for symbol in seq])\n for split in splits:\n print('Creating %s DUC data' % split)\n data = []\n with open(os.path.join(root, '%s.txt' % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:]\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic))\n num_words += length\n print('%s samples: %d' % (split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, '%s.pkl' % split))\n num_sents += len(data)\n print('Average length: %.2f' % (num_words / num_sents))\n",
"step-3": "<mask token>\nroot = 'data'\nratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]\nmax_len = 64\nvocab_size = 16000\ndata = []\npath = os.path.join(root, 'main')\ntopics = os.listdir(path)\ni = 0\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path, topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path, topic, art), encoding='UTF-8') as f:\n lines = f.read()\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, ' ') for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if len(tokens) == 0:\n break\n else:\n data.append(' '.join(lines))\nif True:\n random.shuffle(data)\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, '%s.txt' % split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n print('Building vocabulary from DUC data')\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n print('Vocabulary size: %d' % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[\n '<unk>']) for symbol in seq])\n for split in splits:\n print('Creating %s DUC data' % split)\n data = []\n with open(os.path.join(root, '%s.txt' % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:]\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic))\n num_words += length\n print('%s samples: %d' % (split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, '%s.pkl' % split))\n num_sents += len(data)\n print('Average length: %.2f' % (num_words / num_sents))\n",
"step-4": "import requests\nimport os\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize\nfrom collections import Counter\nimport random\nfrom utils import save_pickle\nroot = 'data'\nratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]\nmax_len = 64\nvocab_size = 16000\ndata = []\npath = os.path.join(root, 'main')\ntopics = os.listdir(path)\ni = 0\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path, topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path, topic, art), encoding='UTF-8') as f:\n lines = f.read()\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, ' ') for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if len(tokens) == 0:\n break\n else:\n data.append(' '.join(lines))\nif True:\n random.shuffle(data)\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, '%s.txt' % split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n print('Building vocabulary from DUC data')\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n print('Vocabulary size: %d' % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[\n '<unk>']) for symbol in seq])\n for split in splits:\n print('Creating %s DUC data' % split)\n data = []\n with open(os.path.join(root, '%s.txt' % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:]\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic))\n num_words += length\n print('%s samples: %d' % (split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, '%s.pkl' % split))\n num_sents += len(data)\n print('Average length: %.2f' % (num_words / num_sents))\n",
"step-5": "import requests\nimport os\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize\nfrom collections import Counter\nimport random\n\nfrom utils import save_pickle\n\n\nroot = 'data'\nratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]\nmax_len = 64\nvocab_size = 16000\n\n\ndata = []\npath = os.path.join(root,'main')\ntopics = os.listdir(path)\ni = 0\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path,topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path,topic,art),encoding='UTF-8') as f:\n #lines = unicode(f.read(), errors='ignore')\n lines = f.read()\n #print(type(lines))\n #print(i,j)\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n # replace punctuation characters with spaces\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, \" \") for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if(len(tokens)==0):\n break\n else:\n data.append(' '.join(lines))\n\nif True:\n random.shuffle(data)\n\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, \"%s.txt\"%split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n\n print(\"Building vocabulary from DUC data\")\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n\n # exclude <bos> and <pad> symbols\n print(\"Vocabulary size: %d\" % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([\n word_to_idx.get(symbol, word_to_idx['<unk>']) for symbol in seq])\n for split in splits:\n print(\"Creating %s DUC data\" % split)\n data = []\n with open(os.path.join(root, \"%s.txt\" % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:] ###\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic)) ###\n num_words += length\n print(\"%s samples: %d\" %(split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, \"%s.pkl\" % split))\n num_sents += len(data)\n\n print(\"Average length: %.2f\" %(num_words / num_sents))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def solution(skill, skill_trees):
answer = 0
for tree in skill_trees:
able = True
for i in range(len(skill) - 1, 0, -1):
index = tree.find(skill[i])
if index != -1 and i > 0:
if tree[:index].find(skill[i - 1]) == -1:
able = False
break
if able:
answer += 1
return answer
if __name__ == "__main__":
skill = "CBD"
skill_trees = ["BACDE", "CBADF", "AECB", "BDA"]
solution(skill=skill, skill_trees=skill_trees)
|
normal
|
{
"blob_id": "a72d878d246a459038640bf9c1deff562994b345",
"index": 7338,
"step-1": "<mask token>\n",
"step-2": "def solution(skill, skill_trees):\n answer = 0\n for tree in skill_trees:\n able = True\n for i in range(len(skill) - 1, 0, -1):\n index = tree.find(skill[i])\n if index != -1 and i > 0:\n if tree[:index].find(skill[i - 1]) == -1:\n able = False\n break\n if able:\n answer += 1\n return answer\n\n\n<mask token>\n",
"step-3": "def solution(skill, skill_trees):\n answer = 0\n for tree in skill_trees:\n able = True\n for i in range(len(skill) - 1, 0, -1):\n index = tree.find(skill[i])\n if index != -1 and i > 0:\n if tree[:index].find(skill[i - 1]) == -1:\n able = False\n break\n if able:\n answer += 1\n return answer\n\n\nif __name__ == '__main__':\n skill = 'CBD'\n skill_trees = ['BACDE', 'CBADF', 'AECB', 'BDA']\n solution(skill=skill, skill_trees=skill_trees)\n",
"step-4": "def solution(skill, skill_trees):\n answer = 0\n \n for tree in skill_trees:\n able = True\n for i in range(len(skill) - 1, 0, -1):\n index = tree.find(skill[i])\n if index != -1 and i > 0:\n if tree[:index].find(skill[i - 1]) == -1:\n able = False\n break\n if able: \n answer += 1\n \n return answer\n\nif __name__ == \"__main__\":\n skill = \"CBD\"\n skill_trees\t= [\"BACDE\", \"CBADF\", \"AECB\", \"BDA\"]\t\n solution(skill=skill, skill_trees=skill_trees)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
<|reserved_special_token_0|>
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for index, drip in zip(range(len(self.drip_history), 0, -1), self.
drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = (drip - bottom) / self.sections * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *
s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is 'True':
return True
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class I18NHelpPopup(I18NPopup):
<|reserved_special_token_0|>
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
def on_height(self, instance, value):
self.section_height = self.height / self.sections
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for index, drip in zip(range(len(self.drip_history), 0, -1), self.
drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = (drip - bottom) / self.sections * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *
s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is 'True':
return True
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TouchyLabel(I18NLabel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class I18NHelpPopup(I18NPopup):
text_source = StringProperty()
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
def on_height(self, instance, value):
self.section_height = self.height / self.sections
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for index, drip in zip(range(len(self.drip_history), 0, -1), self.
drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = (drip - bottom) / self.sections * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *
s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is 'True':
return True
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TouchyLabel(I18NLabel):
<|reserved_special_token_0|>
def on_touch_down(self, touch):
if touch.is_triple_tap:
self.is_on = not self.is_on
class I18NHelpPopup(I18NPopup):
text_source = StringProperty()
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
def on_height(self, instance, value):
self.section_height = self.height / self.sections
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for index, drip in zip(range(len(self.drip_history), 0, -1), self.
drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = (drip - bottom) / self.sections * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *
s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is 'True':
return True
return False
<|reserved_special_token_1|>
from kivy.uix.boxlayout import BoxLayout
from kivy.graphics import *
from kivy.clock import Clock
from kivy.properties import StringProperty, BooleanProperty
from kivy.uix.popup import Popup
import time
from math import sin, pi
from kivy.lang import Builder
from ui.custom_widgets import I18NPopup, I18NLabel
Builder.load_file('ui/peachy_widgets.kv')
class TouchyLabel(I18NLabel):
is_on = BooleanProperty(False)
def on_touch_down(self, touch):
if touch.is_triple_tap:
self.is_on = not self.is_on
class I18NHelpPopup(I18NPopup):
text_source = StringProperty()
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
def on_height(self, instance, value):
self.section_height = self.height / self.sections
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for (index, drip) in zip(range(len(self.drip_history), 0, -1), self.drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = ((drip - bottom) / self.sections) * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 * s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is "True":
return True
return False
|
flexible
|
{
"blob_id": "96086885e5353f3b4b3277c1daf4ee74831c3b73",
"index": 8841,
"step-1": "<mask token>\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n <mask token>\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-2": "<mask token>\n\n\nclass I18NHelpPopup(I18NPopup):\n <mask token>\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-3": "<mask token>\n\n\nclass TouchyLabel(I18NLabel):\n <mask token>\n <mask token>\n\n\nclass I18NHelpPopup(I18NPopup):\n text_source = StringProperty()\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-4": "<mask token>\n\n\nclass TouchyLabel(I18NLabel):\n <mask token>\n\n def on_touch_down(self, touch):\n if touch.is_triple_tap:\n self.is_on = not self.is_on\n\n\nclass I18NHelpPopup(I18NPopup):\n text_source = StringProperty()\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-5": "from kivy.uix.boxlayout import BoxLayout\nfrom kivy.graphics import *\nfrom kivy.clock import Clock\nfrom kivy.properties import StringProperty, BooleanProperty\nfrom kivy.uix.popup import Popup\nimport time\nfrom math import sin, pi\n\nfrom kivy.lang import Builder\nfrom ui.custom_widgets import I18NPopup, I18NLabel\n\n\nBuilder.load_file('ui/peachy_widgets.kv')\n\n\nclass TouchyLabel(I18NLabel):\n\n is_on = BooleanProperty(False)\n\n def on_touch_down(self, touch):\n if touch.is_triple_tap:\n self.is_on = not self.is_on\n\n\nclass I18NHelpPopup(I18NPopup):\n text_source = StringProperty()\n\n\nclass Dripper(BoxLayout):\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for (index, drip) in zip(range(len(self.drip_history), 0, -1), self.drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = ((drip - bottom) / self.sections) * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 * s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is \"True\":\n return True\n return False\n",
"step-ids": [
10,
12,
14,
15,
19
]
}
|
[
10,
12,
14,
15,
19
] |
import os
import lasagne
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import Conv2DLayer,\
MaxPool2DLayer,\
InputLayer
from lasagne.nonlinearities import elu, sigmoid, rectify
from lasagne.regularization import l2, regularize_layer_params
from utils.maxpool_multiply import MaxPoolMultiplyLayer
from models.cascade_base import CascadeBase
class FaceTrigger(CascadeBase):
def build_network(self):
net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.img_shape),
self.input_X,
name='network input'))
convs = []
# Build network
for i in range(self.num_cascades):
net = lasagne.layers.batch_norm(Conv2DLayer(net,
nonlinearity=elu,
num_filters=self.num_filters[i],
filter_size=self.filter_sizes[i],
pad='same',
name='conv {}'.format(i + 1)))
convs.append(net)
net = MaxPool2DLayer(net,
pool_size=self.pool_sizes[i],
name='Max Pool {} {}'.format(i + 1, i + 2))
out = Conv2DLayer(net,
nonlinearity=sigmoid,
num_filters=1,
filter_size=1,
pad='same',
name='prediction layer')
branches = [None] * self.num_cascades
# Build branches
for i in range(self.num_cascades):
branches[i] = Conv2DLayer(convs[i],
num_filters=1,
filter_size=1,
nonlinearity=sigmoid,
name='decide network {} output'.format(i + 1))
downsampled_activation_layers = [branches[0]]
for i in range(self.num_cascades - 1):
downsampled_activation_layers.append(MaxPoolMultiplyLayer(branches[i + 1],
downsampled_activation_layers[-1],
self.pool_sizes[i]))
masked_out = MaxPoolMultiplyLayer(out,
downsampled_activation_layers[-1],
self.pool_sizes[-1])
return out, downsampled_activation_layers, masked_out
|
normal
|
{
"blob_id": "1dd5c25cd3b7bc933ba0b63d9a42fdddc92b8531",
"index": 8737,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FaceTrigger(CascadeBase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FaceTrigger(CascadeBase):\n\n def build_network(self):\n net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.\n img_shape), self.input_X, name='network input'))\n convs = []\n for i in range(self.num_cascades):\n net = lasagne.layers.batch_norm(Conv2DLayer(net, nonlinearity=\n elu, num_filters=self.num_filters[i], filter_size=self.\n filter_sizes[i], pad='same', name='conv {}'.format(i + 1)))\n convs.append(net)\n net = MaxPool2DLayer(net, pool_size=self.pool_sizes[i], name=\n 'Max Pool {} {}'.format(i + 1, i + 2))\n out = Conv2DLayer(net, nonlinearity=sigmoid, num_filters=1,\n filter_size=1, pad='same', name='prediction layer')\n branches = [None] * self.num_cascades\n for i in range(self.num_cascades):\n branches[i] = Conv2DLayer(convs[i], num_filters=1, filter_size=\n 1, nonlinearity=sigmoid, name='decide network {} output'.\n format(i + 1))\n downsampled_activation_layers = [branches[0]]\n for i in range(self.num_cascades - 1):\n downsampled_activation_layers.append(MaxPoolMultiplyLayer(\n branches[i + 1], downsampled_activation_layers[-1], self.\n pool_sizes[i]))\n masked_out = MaxPoolMultiplyLayer(out,\n downsampled_activation_layers[-1], self.pool_sizes[-1])\n return out, downsampled_activation_layers, masked_out\n",
"step-4": "import os\nimport lasagne\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom lasagne.layers import Conv2DLayer, MaxPool2DLayer, InputLayer\nfrom lasagne.nonlinearities import elu, sigmoid, rectify\nfrom lasagne.regularization import l2, regularize_layer_params\nfrom utils.maxpool_multiply import MaxPoolMultiplyLayer\nfrom models.cascade_base import CascadeBase\n\n\nclass FaceTrigger(CascadeBase):\n\n def build_network(self):\n net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.\n img_shape), self.input_X, name='network input'))\n convs = []\n for i in range(self.num_cascades):\n net = lasagne.layers.batch_norm(Conv2DLayer(net, nonlinearity=\n elu, num_filters=self.num_filters[i], filter_size=self.\n filter_sizes[i], pad='same', name='conv {}'.format(i + 1)))\n convs.append(net)\n net = MaxPool2DLayer(net, pool_size=self.pool_sizes[i], name=\n 'Max Pool {} {}'.format(i + 1, i + 2))\n out = Conv2DLayer(net, nonlinearity=sigmoid, num_filters=1,\n filter_size=1, pad='same', name='prediction layer')\n branches = [None] * self.num_cascades\n for i in range(self.num_cascades):\n branches[i] = Conv2DLayer(convs[i], num_filters=1, filter_size=\n 1, nonlinearity=sigmoid, name='decide network {} output'.\n format(i + 1))\n downsampled_activation_layers = [branches[0]]\n for i in range(self.num_cascades - 1):\n downsampled_activation_layers.append(MaxPoolMultiplyLayer(\n branches[i + 1], downsampled_activation_layers[-1], self.\n pool_sizes[i]))\n masked_out = MaxPoolMultiplyLayer(out,\n downsampled_activation_layers[-1], self.pool_sizes[-1])\n return out, downsampled_activation_layers, masked_out\n",
"step-5": "import os\nimport lasagne\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom lasagne.layers import Conv2DLayer,\\\n MaxPool2DLayer,\\\n InputLayer\nfrom lasagne.nonlinearities import elu, sigmoid, rectify\nfrom lasagne.regularization import l2, regularize_layer_params\nfrom utils.maxpool_multiply import MaxPoolMultiplyLayer\n\nfrom models.cascade_base import CascadeBase\n\nclass FaceTrigger(CascadeBase): \n def build_network(self):\n net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.img_shape),\n self.input_X,\n name='network input'))\n \n convs = []\n\n # Build network\n for i in range(self.num_cascades):\n net = lasagne.layers.batch_norm(Conv2DLayer(net,\n nonlinearity=elu,\n num_filters=self.num_filters[i],\n filter_size=self.filter_sizes[i],\n pad='same',\n name='conv {}'.format(i + 1)))\n convs.append(net)\n net = MaxPool2DLayer(net,\n pool_size=self.pool_sizes[i],\n name='Max Pool {} {}'.format(i + 1, i + 2))\n\n \n out = Conv2DLayer(net,\n nonlinearity=sigmoid,\n num_filters=1,\n filter_size=1,\n pad='same',\n name='prediction layer')\n \n branches = [None] * self.num_cascades\n\n # Build branches\n for i in range(self.num_cascades):\n branches[i] = Conv2DLayer(convs[i],\n num_filters=1,\n filter_size=1,\n nonlinearity=sigmoid,\n name='decide network {} output'.format(i + 1))\n\n downsampled_activation_layers = [branches[0]]\n\n for i in range(self.num_cascades - 1):\n downsampled_activation_layers.append(MaxPoolMultiplyLayer(branches[i + 1],\n downsampled_activation_layers[-1],\n self.pool_sizes[i]))\n masked_out = MaxPoolMultiplyLayer(out,\n downsampled_activation_layers[-1],\n self.pool_sizes[-1])\n \n return out, downsampled_activation_layers, masked_out",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url(regex='^(?P<pk>\\d+)$', view=views.UserDetailView.
as_view(), name='user_detail'), url(regex='^update/(?P<pk>\\d+)$', view
=views.UserUpdateView.as_view(), name='user_update'), url(regex=
'^email/update/(?P<pk>\\d+)$', view=views.EmailUpdateView.as_view(),
name='email_change'), url(regex='^password/change$', view=auth_views.
password_change, kwargs={'template_name':
'accounts/password_change_form.html', 'current_app': 'accounts',
'password_change_form': SetPasswordForm}, name='password_change'), url(
regex='^password/change/done$', view=auth_views.password_change_done,
kwargs={'template_name': 'accounts/password_change_done.html',
'current_app': 'accounts'}, name='password_change_done'), url(regex=
'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),
url(regex='^all_trainees$', view=views.AllTrainees.as_view(), name=
'trainee_information')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import SetPasswordForm
from . import views
urlpatterns = [url(regex='^(?P<pk>\\d+)$', view=views.UserDetailView.
as_view(), name='user_detail'), url(regex='^update/(?P<pk>\\d+)$', view
=views.UserUpdateView.as_view(), name='user_update'), url(regex=
'^email/update/(?P<pk>\\d+)$', view=views.EmailUpdateView.as_view(),
name='email_change'), url(regex='^password/change$', view=auth_views.
password_change, kwargs={'template_name':
'accounts/password_change_form.html', 'current_app': 'accounts',
'password_change_form': SetPasswordForm}, name='password_change'), url(
regex='^password/change/done$', view=auth_views.password_change_done,
kwargs={'template_name': 'accounts/password_change_done.html',
'current_app': 'accounts'}, name='password_change_done'), url(regex=
'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),
url(regex='^all_trainees$', view=views.AllTrainees.as_view(), name=
'trainee_information')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import SetPasswordForm
from . import views
urlpatterns = [
url(regex=r'^(?P<pk>\d+)$', view=views.UserDetailView.as_view(), name='user_detail'),
url(regex=r'^update/(?P<pk>\d+)$', view=views.UserUpdateView.as_view(), name='user_update'),
url(regex=r'^email/update/(?P<pk>\d+)$', view=views.EmailUpdateView.as_view(), name='email_change'),
url(regex=r'^password/change$', view=auth_views.password_change,
kwargs={'template_name': 'accounts/password_change_form.html',
'current_app': 'accounts', 'password_change_form': SetPasswordForm},
name='password_change'),
url(regex=r'^password/change/done$', view=auth_views.password_change_done,
kwargs={'template_name': 'accounts/password_change_done.html', 'current_app': 'accounts'},
name='password_change_done'),
url(regex=r'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),
url(regex=r'^all_trainees$', view=views.AllTrainees.as_view(), name='trainee_information'),
]
|
flexible
|
{
"blob_id": "1ac0f5c62ee3cb60d4443b65d429f4f0e6815100",
"index": 5488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url(regex='^(?P<pk>\\\\d+)$', view=views.UserDetailView.\n as_view(), name='user_detail'), url(regex='^update/(?P<pk>\\\\d+)$', view\n =views.UserUpdateView.as_view(), name='user_update'), url(regex=\n '^email/update/(?P<pk>\\\\d+)$', view=views.EmailUpdateView.as_view(),\n name='email_change'), url(regex='^password/change$', view=auth_views.\n password_change, kwargs={'template_name':\n 'accounts/password_change_form.html', 'current_app': 'accounts',\n 'password_change_form': SetPasswordForm}, name='password_change'), url(\n regex='^password/change/done$', view=auth_views.password_change_done,\n kwargs={'template_name': 'accounts/password_change_done.html',\n 'current_app': 'accounts'}, name='password_change_done'), url(regex=\n '^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),\n url(regex='^all_trainees$', view=views.AllTrainees.as_view(), name=\n 'trainee_information')]\n",
"step-3": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom . import views\nurlpatterns = [url(regex='^(?P<pk>\\\\d+)$', view=views.UserDetailView.\n as_view(), name='user_detail'), url(regex='^update/(?P<pk>\\\\d+)$', view\n =views.UserUpdateView.as_view(), name='user_update'), url(regex=\n '^email/update/(?P<pk>\\\\d+)$', view=views.EmailUpdateView.as_view(),\n name='email_change'), url(regex='^password/change$', view=auth_views.\n password_change, kwargs={'template_name':\n 'accounts/password_change_form.html', 'current_app': 'accounts',\n 'password_change_form': SetPasswordForm}, name='password_change'), url(\n regex='^password/change/done$', view=auth_views.password_change_done,\n kwargs={'template_name': 'accounts/password_change_done.html',\n 'current_app': 'accounts'}, name='password_change_done'), url(regex=\n '^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),\n url(regex='^all_trainees$', view=views.AllTrainees.as_view(), name=\n 'trainee_information')]\n",
"step-4": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.forms import SetPasswordForm\n\nfrom . import views\n\nurlpatterns = [\n url(regex=r'^(?P<pk>\\d+)$', view=views.UserDetailView.as_view(), name='user_detail'),\n url(regex=r'^update/(?P<pk>\\d+)$', view=views.UserUpdateView.as_view(), name='user_update'),\n url(regex=r'^email/update/(?P<pk>\\d+)$', view=views.EmailUpdateView.as_view(), name='email_change'),\n url(regex=r'^password/change$', view=auth_views.password_change,\n kwargs={'template_name': 'accounts/password_change_form.html',\n 'current_app': 'accounts', 'password_change_form': SetPasswordForm},\n name='password_change'),\n url(regex=r'^password/change/done$', view=auth_views.password_change_done,\n kwargs={'template_name': 'accounts/password_change_done.html', 'current_app': 'accounts'},\n name='password_change_done'),\n url(regex=r'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),\n url(regex=r'^all_trainees$', view=views.AllTrainees.as_view(), name='trainee_information'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from matplotlib import pyplot as plt
# Function for testing
# Maps x => x*x
def calculate(x):
return x * x
inputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]
outputs = [calculate(x) for x in inputs]
plt.plot(inputs, outputs)
plt.savefig("plot.png")
|
normal
|
{
"blob_id": "1b3891565f776064cfcca02fb22ea65853f7e66f",
"index": 3629,
"step-1": "<mask token>\n\n\ndef calculate(x):\n return x * x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculate(x):\n return x * x\n\n\n<mask token>\nplt.plot(inputs, outputs)\nplt.savefig('plot.png')\n",
"step-3": "<mask token>\n\n\ndef calculate(x):\n return x * x\n\n\ninputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]\noutputs = [calculate(x) for x in inputs]\nplt.plot(inputs, outputs)\nplt.savefig('plot.png')\n",
"step-4": "from matplotlib import pyplot as plt\n\n\ndef calculate(x):\n return x * x\n\n\ninputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]\noutputs = [calculate(x) for x in inputs]\nplt.plot(inputs, outputs)\nplt.savefig('plot.png')\n",
"step-5": "from matplotlib import pyplot as plt\n\n# Function for testing\n# Maps x => x*x\ndef calculate(x):\n\treturn x * x\n\n\ninputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]\n\noutputs = [calculate(x) for x in inputs]\n\nplt.plot(inputs, outputs)\nplt.savefig(\"plot.png\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""This file parses vbulletin forums"""
import re
import logging
from BeautifulSoup import BeautifulSoup as bs
import imaget
import pdb
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
date_marker = ["<!-- status icon and date -->", "<!-- / status icon and date -->"]
message_marker = ["<!-- message -->", "<!-- / message -->"]
sig_marker = ["<!-- sig -->", "<!-- / sig -->"]
edit_marker = ["<!-- edit note -->", "<!-- / edit note -->"]
def get_subforums(main_soup):
subforums = main_soup.findAll('td', attrs={'class':'alt1Active'})
sublinks = []
for s in subforums:
links = s.findAll('a')
for a in links:
if not "http" in a['href']:
break
link = a['href']
text = a.getText()
sublinks.append({'name':text, 'link':link})
return sublinks
def get_threads(subforum_soup):
"""This function gets information on the threads from the subforum page. It also returns the total number of pages"""
threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links
#page _ of _
page = 1
page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})
if page_count:
page_count = page_count.getText()
page_match = re.search(r'(\d+) .+? (\d+)', page_count)
if page_match:
page_count = int(page_match.group(2))
page = int(page_match.group(1))
logger.debug("get_threads: page_count = %d, page = %d" % (page_count, page))
else:
page_count = 1
page = 1
thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \d+?', x)})
if len(threads) != len(thread_counts):
logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))
logger.debug('get_threads: threads = %s' % str(threads))
logger.debug('get_threads: thread_counts = %s' % str(thread_counts))
threadlinks = []
for i in range(min(len(threads), len(thread_counts))):
t = threads[i]
c = thread_counts[i]
sanatized = c['title'].replace(',', '')
count = int(re.search(r'.+?: (\d+?) .+?: (\d+?)',sanatized).group(1)) + 1
text = t.getText()
link = t['href']
threadlinks.append({'name':text, 'link':link, 'count':count})
return threadlinks, (page, page_count)
def get_page(thread_url, pagenum):
return thread_url + "&page=" + str(pagenum)
def get_posts(page_soup):
page_soup = bs(page_soup)
#page _ of _
page_count = page_soup.find('td', attrs={'class':'vbmenu_control'})
if page_count:
page_count = page_count.getText()
page_match = re.search(r'(\d+) .+? (\d+)', page_count)
if page_match:
page_count = int(page_match.group(2))
page = int(page_match.group(1))
else:
page_count = 1
page = 1
posts = page_soup.findAll('table', attrs={'id':lambda x: x and re.match(r'post', x)})
logging.info('get_post: got %d posts' % len(posts))
post_list = []
for p in posts:
post_link = p.find('a', attrs={'name': lambda x: x and re.match(r'\d+', x)})['href']
post_string = str(p)
raw_message = extract(post_string, message_marker[0], message_marker[1])
date = extract(post_string, date_marker[0], date_marker[1])
date = strip_tags(date).strip()
message = get_message(raw_message)
sig = extract(post_string, sig_marker[0], sig_marker[1])
edit = extract(post_string, edit_marker[0], edit_marker[1])
msg_image_srcs = imaget.get_image_src(raw_message)
if msg_image_srcs: msg_image_srcs = msg_image_srcs[0]
print "message source: "
print msg_image_srcs
print "\n\n\n"
user = get_user(post_string, sig)
post_list.append({'date': date, 'message': message, 'edit': edit, 'message images': msg_image_srcs, 'user': user, 'link': post_link})
return post_list, (page, page_count)
def get_user(post_string, sig = ""):
user_tag = bs(post_string).find('td', attrs={'class':'alt2'})
user_link = user_tag.find('a', attrs={'class':'bigusername'})
if not user_link: return {'tag': user_tag, 'name': 'guest', 'link': None, 'join': None, 'sig': None, 'image': None, 'title': 'guest'}
user_name = user_link.getText()
user_link = user_link['href']
user_title = user_tag.findAll('div')[1].getText()
user_div = user_tag.findAll('div')
inner_ind = 2
while len(user_div[inner_ind].findAll('div'))<3:
inner_ind+=1
inner_name_soup = user_div[inner_ind].findAll('div')
join_date = inner_name_soup[0].getText()[len("Join Date: "):]
user_image_src = imaget.get_image_src(user_tag, 1)
return {'tag': user_tag, 'name':user_name, 'link': user_link, 'title': user_title, 'join': join_date, 'sig': sig, 'image': user_image_src}
def get_message(message_str):
message_soup = bs(message_str)
images = message_soup.findAll('img')
for item in images:
item.extract()
scripts = message_soup.findAll('script')
for item in scripts:
item.extract()
return str(message_soup)
def extract(string, start_marker, end_marker):
"""wrapper function for slicing into a string"""
start_loc = string.find(start_marker)
end_loc = string.find(end_marker)
if start_loc == -1 or end_loc == -1:
return ""
return string[start_loc+len(start_marker):end_loc]
def strip_tags(source):
return re.sub(r'<.+?>', '', source)
|
normal
|
{
"blob_id": "0846f73482ad86158c3f4e37713d6d965e21d796",
"index": 2671,
"step-1": "\"\"\"This file parses vbulletin forums\"\"\"\n\nimport re\nimport logging\nfrom BeautifulSoup import BeautifulSoup as bs\nimport imaget\nimport pdb\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndate_marker = [\"<!-- status icon and date -->\", \"<!-- / status icon and date -->\"]\nmessage_marker = [\"<!-- message -->\", \"<!-- / message -->\"]\nsig_marker = [\"<!-- sig -->\", \"<!-- / sig -->\"]\nedit_marker = [\"<!-- edit note -->\", \"<!-- / edit note -->\"]\n\n\n\ndef get_subforums(main_soup):\n\n subforums = main_soup.findAll('td', attrs={'class':'alt1Active'})\n sublinks = []\n for s in subforums:\n links = s.findAll('a')\n for a in links:\n if not \"http\" in a['href']:\n break\n link = a['href']\n text = a.getText()\n sublinks.append({'name':text, 'link':link})\n\n return sublinks\n\n\ndef get_threads(subforum_soup):\n \"\"\"This function gets information on the threads from the subforum page. It also returns the total number of pages\"\"\"\n threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links\n\n #page _ of _\n page = 1\n page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})\n if page_count:\n page_count = page_count.getText()\n page_match = re.search(r'(\\d+) .+? (\\d+)', page_count)\n if page_match:\n page_count = int(page_match.group(2))\n page = int(page_match.group(1))\n logger.debug(\"get_threads: page_count = %d, page = %d\" % (page_count, page))\n else:\n page_count = 1\n page = 1\n\n thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \\d+?', x)})\n if len(threads) != len(thread_counts):\n logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))\n logger.debug('get_threads: threads = %s' % str(threads))\n\tlogger.debug('get_threads: thread_counts = %s' % str(thread_counts))\n threadlinks = []\n for i in range(min(len(threads), len(thread_counts))):\n t = threads[i]\n c = thread_counts[i]\n sanatized = c['title'].replace(',', '')\n count = int(re.search(r'.+?: (\\d+?) .+?: (\\d+?)',sanatized).group(1)) + 1\n text = t.getText()\n link = t['href']\n threadlinks.append({'name':text, 'link':link, 'count':count})\n return threadlinks, (page, page_count)\n\ndef get_page(thread_url, pagenum):\n return thread_url + \"&page=\" + str(pagenum)\n\ndef get_posts(page_soup):\n\n page_soup = bs(page_soup)\n\n\n #page _ of _\n page_count = page_soup.find('td', attrs={'class':'vbmenu_control'})\n if page_count:\n page_count = page_count.getText()\n page_match = re.search(r'(\\d+) .+? (\\d+)', page_count)\n if page_match:\n page_count = int(page_match.group(2))\n page = int(page_match.group(1))\n else:\n page_count = 1\n page = 1\n posts = page_soup.findAll('table', attrs={'id':lambda x: x and re.match(r'post', x)})\n logging.info('get_post: got %d posts' % len(posts))\n post_list = []\n for p in posts:\n post_link = p.find('a', attrs={'name': lambda x: x and re.match(r'\\d+', x)})['href']\n post_string = str(p)\n raw_message = extract(post_string, message_marker[0], message_marker[1])\n\n date = extract(post_string, date_marker[0], date_marker[1])\n date = strip_tags(date).strip()\n message = get_message(raw_message)\n sig = extract(post_string, sig_marker[0], sig_marker[1])\n edit = extract(post_string, edit_marker[0], edit_marker[1])\n\n msg_image_srcs = imaget.get_image_src(raw_message)\n if msg_image_srcs: msg_image_srcs = msg_image_srcs[0]\n print \"message source: \" \n print msg_image_srcs\n print \"\\n\\n\\n\"\n\n user = get_user(post_string, sig)\n\n post_list.append({'date': date, 'message': message, 'edit': edit, 'message images': msg_image_srcs, 'user': user, 'link': post_link})\n\n return post_list, (page, page_count)\n\n\n\ndef get_user(post_string, sig = \"\"):\n\n user_tag = bs(post_string).find('td', attrs={'class':'alt2'})\n user_link = user_tag.find('a', attrs={'class':'bigusername'})\n if not user_link: return {'tag': user_tag, 'name': 'guest', 'link': None, 'join': None, 'sig': None, 'image': None, 'title': 'guest'}\n user_name = user_link.getText()\n user_link = user_link['href']\n user_title = user_tag.findAll('div')[1].getText()\n \n user_div = user_tag.findAll('div')\n inner_ind = 2\n while len(user_div[inner_ind].findAll('div'))<3:\n inner_ind+=1\n inner_name_soup = user_div[inner_ind].findAll('div')\n join_date = inner_name_soup[0].getText()[len(\"Join Date: \"):]\n\n user_image_src = imaget.get_image_src(user_tag, 1)\n\n return {'tag': user_tag, 'name':user_name, 'link': user_link, 'title': user_title, 'join': join_date, 'sig': sig, 'image': user_image_src}\n\n \n \n\ndef get_message(message_str):\n message_soup = bs(message_str)\n images = message_soup.findAll('img')\n for item in images:\n item.extract()\n scripts = message_soup.findAll('script')\n for item in scripts:\n item.extract()\n return str(message_soup)\n \n \n\ndef extract(string, start_marker, end_marker):\n \"\"\"wrapper function for slicing into a string\"\"\"\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]\n\ndef strip_tags(source):\n return re.sub(r'<.+?>', '', source) \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask, url_for, render_template, request
import os
import blescan
import sys
import requests
import logging
from logging.handlers import RotatingFileHandler
import json
from datetime import datetime
import bluetooth._bluetooth as bluez
app = Flask(__name__)
@app.route('/sivut/')
def default_page():
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
app.logger.info("ble thread started")
except:
app.logger.info("error accessing bluetooth device...")
sys.exit(1)
blescan.hci_le_set_scan_parameters(sock)
blescan.hci_enable_le_scan(sock)
returnedList = blescan.parse_events(sock, 10)
app.logger.info(returnedList)
print "----------"
setti = set()
stop_name = ""
for beacon in returnedList:
if '2f234454cf6d4a0fadf2f4911ba9ffa6' in beacon:
app.logger.info("beacon loydetty")
r = requests.get("http://stop2.herokuapp.com/stop/2f234454-cf6d-4a0f-adf2-f4911ba9ffa6")
content = r.content
content = json.loads(content)
stop_name = content['stop_name']
palautus = "<h3>Press button to stop bus:</h3> "
for asd in content['schedule']:
setti.add(asd['line'])
arrival = datetime.fromtimestamp(int(asd['arrival'])).strftime('%H:%M')
palautus += " <div class='btn btn-lg stop_bus' style='margin:5px;color:white;background:#F092CD;' id='" + asd['line'] + "'>" + asd['line'] + " " + arrival \
+ "</div> "
content = palautus
break
else:
content = "<h3>You're not near stop</h3>"
app.logger.info("beacon EI loydetty")
return render_template('index_templatelocal.html', content=content, setti=setti, stop_name=stop_name)
@app.route('/stops')
def show_stops():
stops = '''
{"name": "718 to Rautatientori (HSL:1020201)", "stops": [
{"code": "3032", "name": "Valtimontie", "gtfsId": "HSL:1240123"},
{"code": "3030", "name": "Sumatrantie", "gtfsId": "HSL:1240106"},
{"code": "3028", "name": "Kumpulan kampus", "gtfsId": "HSL:1240118"},
{"code": "3024", "name": "Vallilan varikko", "gtfsId": "HSL:1220104"},
{"code": "3022", "name": "Ristikkokatu", "gtfsId": "HSL:1220102"},
{"code": "2410", "name": "S\u00f6rn\u00e4inen(M)", "gtfsId": "HSL:1113131"},
{"code": "2404", "name": "Haapaniemi", "gtfsId": "HSL:1112126"},
{"code": "2402", "name": "Hakaniemi", "gtfsId": "HSL:1111114"},
{"code": null, "name": "Rautatientori", "gtfsId": "HSL:1020201"}]}
'''
return render_template('show_stops.html', stops=json.loads(stops))
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5050))
handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run(host='0.0.0.0', port = port)
|
normal
|
{
"blob_id": "040942e2e09b5c2df5c08207b9c033471b117608",
"index": 500,
"step-1": " \nfrom flask import Flask, url_for, render_template, request\nimport os\nimport blescan\nimport sys\nimport requests\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport json\nfrom datetime import datetime\n\nimport bluetooth._bluetooth as bluez\n\n\napp = Flask(__name__)\n\n\n@app.route('/sivut/')\ndef default_page():\n dev_id = 0\n try:\n sock = bluez.hci_open_dev(dev_id)\n app.logger.info(\"ble thread started\")\n except:\n app.logger.info(\"error accessing bluetooth device...\")\n sys.exit(1)\n\n blescan.hci_le_set_scan_parameters(sock)\n blescan.hci_enable_le_scan(sock)\n\n returnedList = blescan.parse_events(sock, 10)\n app.logger.info(returnedList)\n print \"----------\"\n setti = set()\n stop_name = \"\"\n for beacon in returnedList:\n if '2f234454cf6d4a0fadf2f4911ba9ffa6' in beacon:\n app.logger.info(\"beacon loydetty\")\n r = requests.get(\"http://stop2.herokuapp.com/stop/2f234454-cf6d-4a0f-adf2-f4911ba9ffa6\")\n content = r.content\n content = json.loads(content)\n stop_name = content['stop_name']\n palautus = \"<h3>Press button to stop bus:</h3> \"\n for asd in content['schedule']:\n setti.add(asd['line'])\n arrival = datetime.fromtimestamp(int(asd['arrival'])).strftime('%H:%M')\n palautus += \" <div class='btn btn-lg stop_bus' style='margin:5px;color:white;background:#F092CD;' id='\" + asd['line'] + \"'>\" + asd['line'] + \" \" + arrival \\\n + \"</div> \"\n content = palautus\n break\n else:\n content = \"<h3>You're not near stop</h3>\"\n app.logger.info(\"beacon EI loydetty\")\n return render_template('index_templatelocal.html', content=content, setti=setti, stop_name=stop_name)\n\n\n@app.route('/stops')\ndef show_stops():\n stops = '''\n {\"name\": \"718 to Rautatientori (HSL:1020201)\", \"stops\": [\n {\"code\": \"3032\", \"name\": \"Valtimontie\", \"gtfsId\": \"HSL:1240123\"},\n {\"code\": \"3030\", \"name\": \"Sumatrantie\", \"gtfsId\": \"HSL:1240106\"},\n {\"code\": \"3028\", \"name\": \"Kumpulan kampus\", \"gtfsId\": \"HSL:1240118\"},\n {\"code\": \"3024\", \"name\": \"Vallilan varikko\", \"gtfsId\": \"HSL:1220104\"},\n {\"code\": \"3022\", \"name\": \"Ristikkokatu\", \"gtfsId\": \"HSL:1220102\"},\n {\"code\": \"2410\", \"name\": \"S\\u00f6rn\\u00e4inen(M)\", \"gtfsId\": \"HSL:1113131\"},\n {\"code\": \"2404\", \"name\": \"Haapaniemi\", \"gtfsId\": \"HSL:1112126\"},\n {\"code\": \"2402\", \"name\": \"Hakaniemi\", \"gtfsId\": \"HSL:1111114\"},\n {\"code\": null, \"name\": \"Rautatientori\", \"gtfsId\": \"HSL:1020201\"}]}\n '''\n return render_template('show_stops.html', stops=json.loads(stops))\n\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5050))\n handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.run(host='0.0.0.0', port = port)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class SystemTrayIcon(QSystemTrayIcon):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set_icon_state(self, state):
pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)
self.setIcon(QIcon(pixmap))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SystemTrayIcon(QSystemTrayIcon):
def __init__(self, parent=None):
super(SystemTrayIcon, self).__init__(parent)
self.set_icon_state(QIcon.Disabled)
menu = QMenu(parent)
self.exit_action = menu.addAction('E&xit')
self.exit_action.triggered.connect(self.close_application)
self.setContextMenu(menu)
self.setToolTip(QApplication.instance().applicationName())
<|reserved_special_token_0|>
def set_icon_state(self, state):
pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)
self.setIcon(QIcon(pixmap))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SystemTrayIcon(QSystemTrayIcon):
def __init__(self, parent=None):
super(SystemTrayIcon, self).__init__(parent)
self.set_icon_state(QIcon.Disabled)
menu = QMenu(parent)
self.exit_action = menu.addAction('E&xit')
self.exit_action.triggered.connect(self.close_application)
self.setContextMenu(menu)
self.setToolTip(QApplication.instance().applicationName())
def close_application(self):
self.parent().close()
def set_icon_state(self, state):
pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)
self.setIcon(QIcon(pixmap))
<|reserved_special_token_1|>
from PyQt4.QtGui import QSystemTrayIcon, QApplication, QMenu, QIcon
class SystemTrayIcon(QSystemTrayIcon):
def __init__(self, parent=None):
super(SystemTrayIcon, self).__init__(parent)
self.set_icon_state(QIcon.Disabled)
menu = QMenu(parent)
self.exit_action = menu.addAction('E&xit')
self.exit_action.triggered.connect(self.close_application)
self.setContextMenu(menu)
self.setToolTip(QApplication.instance().applicationName())
def close_application(self):
self.parent().close()
def set_icon_state(self, state):
pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)
self.setIcon(QIcon(pixmap))
|
flexible
|
{
"blob_id": "c6e315d7dd44b998f64eee079f2d8455ffecdc30",
"index": 9931,
"step-1": "<mask token>\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n <mask token>\n <mask token>\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n",
"step-2": "<mask token>\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(SystemTrayIcon, self).__init__(parent)\n self.set_icon_state(QIcon.Disabled)\n menu = QMenu(parent)\n self.exit_action = menu.addAction('E&xit')\n self.exit_action.triggered.connect(self.close_application)\n self.setContextMenu(menu)\n self.setToolTip(QApplication.instance().applicationName())\n <mask token>\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n",
"step-3": "<mask token>\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(SystemTrayIcon, self).__init__(parent)\n self.set_icon_state(QIcon.Disabled)\n menu = QMenu(parent)\n self.exit_action = menu.addAction('E&xit')\n self.exit_action.triggered.connect(self.close_application)\n self.setContextMenu(menu)\n self.setToolTip(QApplication.instance().applicationName())\n\n def close_application(self):\n self.parent().close()\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n",
"step-4": "from PyQt4.QtGui import QSystemTrayIcon, QApplication, QMenu, QIcon\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(SystemTrayIcon, self).__init__(parent)\n self.set_icon_state(QIcon.Disabled)\n menu = QMenu(parent)\n self.exit_action = menu.addAction('E&xit')\n self.exit_action.triggered.connect(self.close_application)\n self.setContextMenu(menu)\n self.setToolTip(QApplication.instance().applicationName())\n\n def close_application(self):\n self.parent().close()\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#"countinu" example : repeat printing "Too small" or "Input is..." according to input's lenth
while True:
s=raw_input('Enter something: ')
if s == 'quit' :
break
if len(s) <3:
print 'Too small'
continue
#continue : not excute lower line, go to next loop
print 'Input is of sufficient lenth'
|
normal
|
{
"blob_id": "915d6547057f43c1cc5d96d9cb4529c56bc85559",
"index": 3412,
"step-1": "#\"countinu\" example : repeat printing \"Too small\" or \"Input is...\" according to input's lenth\r\n\r\nwhile True:\r\n s=raw_input('Enter something: ')\r\n if s == 'quit' :\r\n break\r\n if len(s) <3:\r\n print 'Too small'\r\n continue\r\n #continue : not excute lower line, go to next loop\r\n print 'Input is of sufficient lenth'\r\n\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from src.testcase.case import Case
from src.utils import *
from src.protocol.register import get_conn
from src.precondition import *
class OneCase(object):
"""
Main flow of running one case's autotest
"""
PASS = True
FAIL = False
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
# pre functions
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}
_func(**_args)
# dependency
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass # ToDo
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
# input
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
# compare output
_output = step.get('output')
if _output.get('strict'):
pass # ToDo
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val']['type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, "Not equal! \n\tExpect: {}\n\tGot: {}".format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self._step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
if __name__ == '__main__':
testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')
testcase.run()
|
normal
|
{
"blob_id": "f658959bf7fa5e02a577119930c9b9c1ef59f432",
"index": 2845,
"step-1": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n <mask token>\n <mask token>\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-4": "from src.testcase.case import Case\nfrom src.utils import *\nfrom src.protocol.register import get_conn\nfrom src.precondition import *\n\n\nclass OneCase(object):\n \"\"\"\n Main flow of running one case's autotest\n \"\"\"\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\nif __name__ == '__main__':\n testcase = OneCase(\n '/Users/eacon/github/APIAutoTestFramework/case/sample.json')\n testcase.run()\n",
"step-5": "from src.testcase.case import Case\nfrom src.utils import *\nfrom src.protocol.register import get_conn\nfrom src.precondition import *\n\n\nclass OneCase(object):\n \"\"\"\n Main flow of running one case's autotest\n \"\"\"\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n # pre functions\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}\n _func(**_args)\n # dependency\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass # ToDo\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n # input\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n # compare output\n _output = step.get('output')\n if _output.get('strict'):\n pass # ToDo\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val']['type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, \"Not equal! \\n\\tExpect: {}\\n\\tGot: {}\".format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self._step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\nif __name__ == '__main__':\n testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')\n testcase.run()",
"step-ids": [
7,
9,
10,
13,
14
]
}
|
[
7,
9,
10,
13,
14
] |
#
# * Python 57, Correct Lineup
# * Easy
# * For the opening ceremony of the upcoming sports event an even number of
# * athletes were picked. They formed a correct lineup, i.e. such a lineup in
# * which no two boys or two girls stand together. The first person in the lineup
# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e.
# * the first one together with the second one, the third one together with the
# * fourth one, etc.) had to swap positions with each other.
# * Given a list of athletes, return the list of athletes after the changes, i.e.
# * after each adjacent pair of athletes is swapped.
# * Example
# For athletes = [1, 2, 3, 4, 5, 6], the output should be
# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].
# * Input/Output
# [execution time limit] 4 seconds (py3)
# [input] array.integer athletes
# A list of even length representing the athletes, where each athlete is given
# by the number written on their back.
# Guaranteed constraints:
# 2 ≤ athletes.length ≤ 20,
# 1 ≤ athletes[i] ≤ 100.
# [output] array.integer
# Array of athletes with each pair of adjacent elements swapped.
#%%
# * Solution 1
def correctLineup1(athletes:list)-> list:
return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]
# * Solution 2
# ! bitwise operator ^.
def correctLineup1(athletes:list)-> list:
return [athletes[i^1] for i in range(len(athletes))]
a1 = [1, 2, 3, 4, 5, 6]
r1 = correctLineup1(a1)
print(r1)
# %%
|
normal
|
{
"blob_id": "6c5f60e7a122e3da5e6705bfacf73a361f6c1362",
"index": 1120,
"step-1": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\n<mask token>\n",
"step-2": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\n<mask token>\n",
"step-3": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\n<mask token>\nprint(r1)\n",
"step-4": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\na1 = [1, 2, 3, 4, 5, 6]\nr1 = correctLineup1(a1)\nprint(r1)\n",
"step-5": "#\n# * Python 57, Correct Lineup\n# * Easy\n\n# * For the opening ceremony of the upcoming sports event an even number of \n# * athletes were picked. They formed a correct lineup, i.e. such a lineup in \n# * which no two boys or two girls stand together. The first person in the lineup \n# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e. \n# * the first one together with the second one, the third one together with the \n# * fourth one, etc.) had to swap positions with each other.\n\n# * Given a list of athletes, return the list of athletes after the changes, i.e. \n# * after each adjacent pair of athletes is swapped.\n\n# * Example\n\n# For athletes = [1, 2, 3, 4, 5, 6], the output should be\n# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].\n\n# * Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] array.integer athletes\n\n# A list of even length representing the athletes, where each athlete is given \n# by the number written on their back.\n\n# Guaranteed constraints:\n# 2 ≤ athletes.length ≤ 20,\n# 1 ≤ athletes[i] ≤ 100.\n\n# [output] array.integer\n\n# Array of athletes with each pair of adjacent elements swapped.\n\n#%%\n\n# * Solution 1\ndef correctLineup1(athletes:list)-> list:\n return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]\n\n\n# * Solution 2\n# ! bitwise operator ^. \ndef correctLineup1(athletes:list)-> list:\n return [athletes[i^1] for i in range(len(athletes))]\n\n\na1 = [1, 2, 3, 4, 5, 6]\nr1 = correctLineup1(a1)\nprint(r1)\n\n\n# %%\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# coding=utf-8
# Copyright 2021-Present The THUCTC Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thuctc.utils as utils
from thuctc.modules.module import Module
from thuctc.modules.layer_norm import LayerNorm
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError("The rank of input must be 3.")
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype,
device=inputs.device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype,
device=inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name="embedding"):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(
torch.empty(self.embed_nums, self.embed_dims))
self.add_name(self.weight, "weight")
if bias:
self.bias = nn.Parameter(
torch.zeros(self.embed_dims))
self.add_name(self.bias, "bias")
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0.0,
std=self.embed_dims ** -0.5)
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False,
layer_norm=False, dropout=0.0, scale=False, name="embedding"):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary["source"])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size,
self.embedding_size,
name="word_embedding")
if self.pos_embed is not None:
if self.pos_embed == "learnable":
self.pos_embeddings = Embedding(params.max_pos,
self.embedding_size,
name="pos_embedding")
elif self.pos_embed == "functional":
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError("Unsupported position "
"embedding: %s" % pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size,
name="type_embedding")
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size,
eps=params.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size,
old_embedding_dim,
name="word_embedding").to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * (self.embedding_size ** 0.5)
if self.pos_embed is not None:
if self.pos_embed == "learnable":
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == "functional":
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
|
normal
|
{
"blob_id": "c773b273ad6953bf9c74b11c44aff16e9fd0860e",
"index": 3468,
"step-1": "<mask token>\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n <mask token>\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-2": "<mask token>\n\n\nclass PositionalEmbedding(torch.nn.Module):\n <mask token>\n <mask token>\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-3": "<mask token>\n\n\nclass PositionalEmbedding(torch.nn.Module):\n <mask token>\n\n def forward(self, inputs):\n if inputs.dim() != 3:\n raise ValueError('The rank of input must be 3.')\n length = inputs.shape[1]\n channels = inputs.shape[2]\n half_dim = channels // 2\n positions = torch.arange(length, dtype=inputs.dtype, device=inputs.\n device)\n dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=\n inputs.device)\n scale = math.log(10000.0) / float(half_dim - 1)\n dimensions.mul_(-scale).exp_()\n scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1)\n if channels % 2 == 1:\n pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,\n device=inputs.device)\n signal = torch.cat([signal, pad], axis=1)\n return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport math\nimport torch\nimport torch.nn as nn\nimport thuctc.utils as utils\nfrom thuctc.modules.module import Module\nfrom thuctc.modules.layer_norm import LayerNorm\n\n\nclass PositionalEmbedding(torch.nn.Module):\n\n def __init__(self):\n super(PositionalEmbedding, self).__init__()\n\n def forward(self, inputs):\n if inputs.dim() != 3:\n raise ValueError('The rank of input must be 3.')\n length = inputs.shape[1]\n channels = inputs.shape[2]\n half_dim = channels // 2\n positions = torch.arange(length, dtype=inputs.dtype, device=inputs.\n device)\n dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=\n inputs.device)\n scale = math.log(10000.0) / float(half_dim - 1)\n dimensions.mul_(-scale).exp_()\n scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1)\n if channels % 2 == 1:\n pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,\n device=inputs.device)\n signal = torch.cat([signal, pad], axis=1)\n return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-5": "# coding=utf-8\n# Copyright 2021-Present The THUCTC Authors\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport torch\n\nimport torch.nn as nn\nimport thuctc.utils as utils\n\nfrom thuctc.modules.module import Module\nfrom thuctc.modules.layer_norm import LayerNorm\n\n\nclass PositionalEmbedding(torch.nn.Module):\n\n def __init__(self):\n super(PositionalEmbedding, self).__init__()\n\n def forward(self, inputs):\n if inputs.dim() != 3:\n raise ValueError(\"The rank of input must be 3.\")\n\n length = inputs.shape[1]\n channels = inputs.shape[2]\n half_dim = channels // 2\n\n positions = torch.arange(length, dtype=inputs.dtype,\n device=inputs.device)\n dimensions = torch.arange(half_dim, dtype=inputs.dtype,\n device=inputs.device)\n\n scale = math.log(10000.0) / float(half_dim - 1)\n dimensions.mul_(-scale).exp_()\n\n scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1)\n\n if channels % 2 == 1:\n pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,\n device=inputs.device)\n signal = torch.cat([signal, pad], axis=1)\n\n return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name=\"embedding\"):\n super(Embedding, self).__init__(name=name)\n\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n\n with utils.scope(name):\n self.weight = nn.Parameter(\n torch.empty(self.embed_nums, self.embed_dims))\n self.add_name(self.weight, \"weight\")\n\n if bias:\n self.bias = nn.Parameter(\n torch.zeros(self.embed_dims))\n self.add_name(self.bias, \"bias\")\n else:\n self.bias = None\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0,\n std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n\n if self.bias is not None:\n outputs = outputs + self.bias\n\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False,\n layer_norm=False, dropout=0.0, scale=False, name=\"embedding\"):\n super(UnifiedEmbedding, self).__init__(name=name)\n\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary[\"source\"])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size,\n self.embedding_size,\n name=\"word_embedding\")\n\n if self.pos_embed is not None:\n if self.pos_embed == \"learnable\":\n self.pos_embeddings = Embedding(params.max_pos,\n self.embedding_size,\n name=\"pos_embedding\")\n elif self.pos_embed == \"functional\":\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError(\"Unsupported position \"\n \"embedding: %s\" % pos_embed)\n\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size,\n name=\"type_embedding\")\n\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size,\n eps=params.layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size): \n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size,\n old_embedding_dim,\n name=\"word_embedding\").to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n\n inputs = self.word_embeddings(input_ids)\n\n if self.scale:\n inputs = inputs * (self.embedding_size ** 0.5)\n\n if self.pos_embed is not None:\n if self.pos_embed == \"learnable\":\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == \"functional\":\n inputs = self.pos_embeddings(inputs)\n\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n inputs = inputs + self.type_embeddings(token_type_ids)\n\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n\n return inputs\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
from __future__ import annotations
import asyncio
import signal
from functools import wraps
from typing import TYPE_CHECKING, Awaitable, Callable
import click
from .utils import import_obj
if TYPE_CHECKING:
from donald.manager import Donald
from .types import TV
def import_manager(path: str) -> Donald:
"""Import a manager from a python path."""
manager: Donald = import_obj(path)
return manager
def process_await(fn: Callable[..., Awaitable[TV]]) -> Callable[..., TV]:
@wraps(fn)
@click.pass_context
def wrapper(ctx, *args, **kwargs):
loop = ctx.obj["loop"]
return loop.run_until_complete(fn(ctx, *args, **kwargs))
return wrapper
@click.group()
@click.option(
"-M",
"--manager",
"manager",
required=True,
help="Python path to the manager",
)
@click.pass_context
def cli(ctx: click.Context, manager: str):
ctx.obj["manager"] = import_manager(manager)
@cli.command(help="Launch a worker")
@click.option("-S", "--scheduler", "scheduler", is_flag=True, help="Start a scheduler")
@process_await
async def worker(ctx: click.Context, *, scheduler: bool = False, **params):
"""Launch a worker."""
loop = ctx.obj["loop"]
async def stop():
loop.remove_signal_handler(signal.SIGTERM)
loop.remove_signal_handler(signal.SIGINT)
await worker.stop()
if scheduler:
await manager.scheduler.stop()
await manager.stop()
loop.add_signal_handler(signal.SIGINT, lambda: loop.create_task(stop()))
loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(stop()))
manager: Donald = ctx.obj["manager"]
await manager.start()
if scheduler:
manager.scheduler.start()
worker = manager.create_worker(show_banner=True, **params)
worker.start()
await worker.wait()
@cli.command(help="Launch a scheduler")
@process_await
async def scheduler(ctx: click.Context):
loop = ctx.obj["loop"]
async def stop():
loop.remove_signal_handler(signal.SIGTERM)
loop.remove_signal_handler(signal.SIGINT)
await manager.scheduler.stop()
await manager.stop()
loop.add_signal_handler(signal.SIGINT, lambda: loop.create_task(stop()))
loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(stop()))
manager: Donald = ctx.obj["manager"]
await manager.start()
manager.scheduler.start()
await manager.scheduler.wait()
def main():
loop = asyncio.get_event_loop()
cli(obj={"loop": loop})
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "3da4896f368f067a339db5cc89201c93ba8166ce",
"index": 6220,
"step-1": "<mask token>\n\n\ndef process_await(fn: Callable[..., Awaitable[TV]]) ->Callable[..., TV]:\n\n @wraps(fn)\n @click.pass_context\n def wrapper(ctx, *args, **kwargs):\n loop = ctx.obj['loop']\n return loop.run_until_complete(fn(ctx, *args, **kwargs))\n return wrapper\n\n\n@click.group()\n@click.option('-M', '--manager', 'manager', required=True, help=\n 'Python path to the manager')\n@click.pass_context\ndef cli(ctx: click.Context, manager: str):\n ctx.obj['manager'] = import_manager(manager)\n\n\n<mask token>\n\n\ndef main():\n loop = asyncio.get_event_loop()\n cli(obj={'loop': loop})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef import_manager(path: str) ->Donald:\n \"\"\"Import a manager from a python path.\"\"\"\n manager: Donald = import_obj(path)\n return manager\n\n\ndef process_await(fn: Callable[..., Awaitable[TV]]) ->Callable[..., TV]:\n\n @wraps(fn)\n @click.pass_context\n def wrapper(ctx, *args, **kwargs):\n loop = ctx.obj['loop']\n return loop.run_until_complete(fn(ctx, *args, **kwargs))\n return wrapper\n\n\n@click.group()\n@click.option('-M', '--manager', 'manager', required=True, help=\n 'Python path to the manager')\n@click.pass_context\ndef cli(ctx: click.Context, manager: str):\n ctx.obj['manager'] = import_manager(manager)\n\n\n<mask token>\n\n\ndef main():\n loop = asyncio.get_event_loop()\n cli(obj={'loop': loop})\n\n\n<mask token>\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from donald.manager import Donald\n from .types import TV\n\n\ndef import_manager(path: str) ->Donald:\n \"\"\"Import a manager from a python path.\"\"\"\n manager: Donald = import_obj(path)\n return manager\n\n\ndef process_await(fn: Callable[..., Awaitable[TV]]) ->Callable[..., TV]:\n\n @wraps(fn)\n @click.pass_context\n def wrapper(ctx, *args, **kwargs):\n loop = ctx.obj['loop']\n return loop.run_until_complete(fn(ctx, *args, **kwargs))\n return wrapper\n\n\n@click.group()\n@click.option('-M', '--manager', 'manager', required=True, help=\n 'Python path to the manager')\n@click.pass_context\ndef cli(ctx: click.Context, manager: str):\n ctx.obj['manager'] = import_manager(manager)\n\n\n@cli.command(help='Launch a worker')\n@click.option('-S', '--scheduler', 'scheduler', is_flag=True, help=\n 'Start a scheduler')\n@process_await\nasync def worker(ctx: click.Context, *, scheduler: bool=False, **params):\n \"\"\"Launch a worker.\"\"\"\n loop = ctx.obj['loop']\n\n async def stop():\n loop.remove_signal_handler(signal.SIGTERM)\n loop.remove_signal_handler(signal.SIGINT)\n await worker.stop()\n if scheduler:\n await manager.scheduler.stop()\n await manager.stop()\n loop.add_signal_handler(signal.SIGINT, lambda : loop.create_task(stop()))\n loop.add_signal_handler(signal.SIGTERM, lambda : loop.create_task(stop()))\n manager: Donald = ctx.obj['manager']\n await manager.start()\n if scheduler:\n manager.scheduler.start()\n worker = manager.create_worker(show_banner=True, **params)\n worker.start()\n await worker.wait()\n\n\n@cli.command(help='Launch a scheduler')\n@process_await\nasync def scheduler(ctx: click.Context):\n loop = ctx.obj['loop']\n\n async def stop():\n loop.remove_signal_handler(signal.SIGTERM)\n loop.remove_signal_handler(signal.SIGINT)\n await manager.scheduler.stop()\n await manager.stop()\n loop.add_signal_handler(signal.SIGINT, lambda : loop.create_task(stop()))\n loop.add_signal_handler(signal.SIGTERM, lambda : loop.create_task(stop()))\n manager: Donald = ctx.obj['manager']\n await manager.start()\n manager.scheduler.start()\n await manager.scheduler.wait()\n\n\ndef main():\n loop = asyncio.get_event_loop()\n cli(obj={'loop': loop})\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import annotations\nimport asyncio\nimport signal\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Awaitable, Callable\nimport click\nfrom .utils import import_obj\nif TYPE_CHECKING:\n from donald.manager import Donald\n from .types import TV\n\n\ndef import_manager(path: str) ->Donald:\n \"\"\"Import a manager from a python path.\"\"\"\n manager: Donald = import_obj(path)\n return manager\n\n\ndef process_await(fn: Callable[..., Awaitable[TV]]) ->Callable[..., TV]:\n\n @wraps(fn)\n @click.pass_context\n def wrapper(ctx, *args, **kwargs):\n loop = ctx.obj['loop']\n return loop.run_until_complete(fn(ctx, *args, **kwargs))\n return wrapper\n\n\n@click.group()\n@click.option('-M', '--manager', 'manager', required=True, help=\n 'Python path to the manager')\n@click.pass_context\ndef cli(ctx: click.Context, manager: str):\n ctx.obj['manager'] = import_manager(manager)\n\n\n@cli.command(help='Launch a worker')\n@click.option('-S', '--scheduler', 'scheduler', is_flag=True, help=\n 'Start a scheduler')\n@process_await\nasync def worker(ctx: click.Context, *, scheduler: bool=False, **params):\n \"\"\"Launch a worker.\"\"\"\n loop = ctx.obj['loop']\n\n async def stop():\n loop.remove_signal_handler(signal.SIGTERM)\n loop.remove_signal_handler(signal.SIGINT)\n await worker.stop()\n if scheduler:\n await manager.scheduler.stop()\n await manager.stop()\n loop.add_signal_handler(signal.SIGINT, lambda : loop.create_task(stop()))\n loop.add_signal_handler(signal.SIGTERM, lambda : loop.create_task(stop()))\n manager: Donald = ctx.obj['manager']\n await manager.start()\n if scheduler:\n manager.scheduler.start()\n worker = manager.create_worker(show_banner=True, **params)\n worker.start()\n await worker.wait()\n\n\n@cli.command(help='Launch a scheduler')\n@process_await\nasync def scheduler(ctx: click.Context):\n loop = ctx.obj['loop']\n\n async def stop():\n loop.remove_signal_handler(signal.SIGTERM)\n loop.remove_signal_handler(signal.SIGINT)\n await manager.scheduler.stop()\n await manager.stop()\n loop.add_signal_handler(signal.SIGINT, lambda : loop.create_task(stop()))\n loop.add_signal_handler(signal.SIGTERM, lambda : loop.create_task(stop()))\n manager: Donald = ctx.obj['manager']\n await manager.start()\n manager.scheduler.start()\n await manager.scheduler.wait()\n\n\ndef main():\n loop = asyncio.get_event_loop()\n cli(obj={'loop': loop})\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import annotations\n\nimport asyncio\nimport signal\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Awaitable, Callable\n\nimport click\n\nfrom .utils import import_obj\n\nif TYPE_CHECKING:\n from donald.manager import Donald\n\n from .types import TV\n\n\ndef import_manager(path: str) -> Donald:\n \"\"\"Import a manager from a python path.\"\"\"\n manager: Donald = import_obj(path)\n return manager\n\n\ndef process_await(fn: Callable[..., Awaitable[TV]]) -> Callable[..., TV]:\n @wraps(fn)\n @click.pass_context\n def wrapper(ctx, *args, **kwargs):\n loop = ctx.obj[\"loop\"]\n return loop.run_until_complete(fn(ctx, *args, **kwargs))\n\n return wrapper\n\n\n@click.group()\n@click.option(\n \"-M\",\n \"--manager\",\n \"manager\",\n required=True,\n help=\"Python path to the manager\",\n)\n@click.pass_context\ndef cli(ctx: click.Context, manager: str):\n ctx.obj[\"manager\"] = import_manager(manager)\n\n\n@cli.command(help=\"Launch a worker\")\n@click.option(\"-S\", \"--scheduler\", \"scheduler\", is_flag=True, help=\"Start a scheduler\")\n@process_await\nasync def worker(ctx: click.Context, *, scheduler: bool = False, **params):\n \"\"\"Launch a worker.\"\"\"\n\n loop = ctx.obj[\"loop\"]\n\n async def stop():\n loop.remove_signal_handler(signal.SIGTERM)\n loop.remove_signal_handler(signal.SIGINT)\n await worker.stop()\n if scheduler:\n await manager.scheduler.stop()\n await manager.stop()\n\n loop.add_signal_handler(signal.SIGINT, lambda: loop.create_task(stop()))\n loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(stop()))\n\n manager: Donald = ctx.obj[\"manager\"]\n await manager.start()\n if scheduler:\n manager.scheduler.start()\n\n worker = manager.create_worker(show_banner=True, **params)\n worker.start()\n\n await worker.wait()\n\n\n@cli.command(help=\"Launch a scheduler\")\n@process_await\nasync def scheduler(ctx: click.Context):\n loop = ctx.obj[\"loop\"]\n\n async def stop():\n loop.remove_signal_handler(signal.SIGTERM)\n loop.remove_signal_handler(signal.SIGINT)\n await manager.scheduler.stop()\n await manager.stop()\n\n loop.add_signal_handler(signal.SIGINT, lambda: loop.create_task(stop()))\n loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(stop()))\n\n manager: Donald = ctx.obj[\"manager\"]\n await manager.start()\n\n manager.scheduler.start()\n await manager.scheduler.wait()\n\n\ndef main():\n loop = asyncio.get_event_loop()\n cli(obj={\"loop\": loop})\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def project(X, U, p=None):
if p == None:
p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis=0)
X2 = np.matmul(Z, U.transpose())
return Z, X2
<|reserved_special_token_0|>
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.
fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,
axis=0)).transpose()))
return Z
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
def project(X, U, p=None):
if p == None:
p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis=0)
X2 = np.matmul(Z, U.transpose())
return Z, X2
def PCA(X, threshold=0.9):
X2 = X - np.mean(X, axis=0)
S = np.matmul(X2.transpose(), X2)
[W, U] = np.linalg.eigh(S)
W = np.flip(W, axis=0)
U = np.flip(U, axis=1)
validity = np.cumsum(W) / np.sum(W)
p = np.argmax(validity >= threshold) + 1
if p <= 1 or threshold == 1:
p = X.shape[1]
[Z, X3] = project(X, U, p)
return [Z, p, X3, U, W]
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.
fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,
axis=0)).transpose()))
return Z
<|reserved_special_token_1|>
<|reserved_special_token_0|>
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
def project(X, U, p=None):
if p == None:
p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis=0)
X2 = np.matmul(Z, U.transpose())
return Z, X2
def PCA(X, threshold=0.9):
X2 = X - np.mean(X, axis=0)
S = np.matmul(X2.transpose(), X2)
[W, U] = np.linalg.eigh(S)
W = np.flip(W, axis=0)
U = np.flip(U, axis=1)
validity = np.cumsum(W) / np.sum(W)
p = np.argmax(validity >= threshold) + 1
if p <= 1 or threshold == 1:
p = X.shape[1]
[Z, X3] = project(X, U, p)
return [Z, p, X3, U, W]
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.
fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,
axis=0)).transpose()))
return Z
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib as plt
import scipy.linalg
from distance_metrics import *
import time
import random
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
def project(X, U, p=None):
if p == None:
p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis=0)
X2 = np.matmul(Z, U.transpose())
return Z, X2
def PCA(X, threshold=0.9):
X2 = X - np.mean(X, axis=0)
S = np.matmul(X2.transpose(), X2)
[W, U] = np.linalg.eigh(S)
W = np.flip(W, axis=0)
U = np.flip(U, axis=1)
validity = np.cumsum(W) / np.sum(W)
p = np.argmax(validity >= threshold) + 1
if p <= 1 or threshold == 1:
p = X.shape[1]
[Z, X3] = project(X, U, p)
return [Z, p, X3, U, W]
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.
fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,
axis=0)).transpose()))
return Z
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib as plt
import scipy.linalg
from distance_metrics import *
import time
import random
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
################################################################
# PCA #
################################################################
def project(X, U, p = None):
if p == None: p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis = 0)
X2 = np.matmul(Z, U.transpose())
return (Z, X2)
def PCA(X, threshold = 0.9):
X2 = X - np.mean(X, axis = 0)
S = np.matmul(X2.transpose(), X2) #Covariance Matrix
[W,U] = np.linalg.eigh(S) #eigen vectors in columns
W = np.flip(W, axis = 0)
U = np.flip(U, axis = 1)
validity = np.cumsum(W)/np.sum(W) #represents validity of choosing first i+1 eigenvalues
p = np.argmax(validity>=threshold) + 1
if p<=1 or threshold == 1: p = X.shape[1]
[Z, X3] = project(X, U, p)
#Projection, P, Reconstruction, EigenVectors, EigenValues
return [Z, p, X3, U, W]
################################################################
# Whitening #
################################################################
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X, axis = 0)).transpose()))
return Z
|
flexible
|
{
"blob_id": "c00db6d6fd903236de37ccc029ed30fd46dccdef",
"index": 7711,
"step-1": "<mask token>\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\n<mask token>\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-2": "<mask token>\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\ndef PCA(X, threshold=0.9):\n X2 = X - np.mean(X, axis=0)\n S = np.matmul(X2.transpose(), X2)\n [W, U] = np.linalg.eigh(S)\n W = np.flip(W, axis=0)\n U = np.flip(U, axis=1)\n validity = np.cumsum(W) / np.sum(W)\n p = np.argmax(validity >= threshold) + 1\n if p <= 1 or threshold == 1:\n p = X.shape[1]\n [Z, X3] = project(X, U, p)\n return [Z, p, X3, U, W]\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-3": "<mask token>\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\ndef PCA(X, threshold=0.9):\n X2 = X - np.mean(X, axis=0)\n S = np.matmul(X2.transpose(), X2)\n [W, U] = np.linalg.eigh(S)\n W = np.flip(W, axis=0)\n U = np.flip(U, axis=1)\n validity = np.cumsum(W) / np.sum(W)\n p = np.argmax(validity >= threshold) + 1\n if p <= 1 or threshold == 1:\n p = X.shape[1]\n [Z, X3] = project(X, U, p)\n return [Z, p, X3, U, W]\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport scipy.linalg\nfrom distance_metrics import *\nimport time\nimport random\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\ndef PCA(X, threshold=0.9):\n X2 = X - np.mean(X, axis=0)\n S = np.matmul(X2.transpose(), X2)\n [W, U] = np.linalg.eigh(S)\n W = np.flip(W, axis=0)\n U = np.flip(U, axis=1)\n validity = np.cumsum(W) / np.sum(W)\n p = np.argmax(validity >= threshold) + 1\n if p <= 1 or threshold == 1:\n p = X.shape[1]\n [Z, X3] = project(X, U, p)\n return [Z, p, X3, U, W]\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport scipy.linalg\nfrom distance_metrics import *\n\nimport time\nimport random\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\n\n################################################################\n\t\t# PCA #\n################################################################\n\ndef project(X, U, p = None):\n if p == None: p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis = 0)\n X2 = np.matmul(Z, U.transpose())\n return (Z, X2)\ndef PCA(X, threshold = 0.9):\n X2 = X - np.mean(X, axis = 0)\n S = np.matmul(X2.transpose(), X2) #Covariance Matrix\n [W,U] = np.linalg.eigh(S) #eigen vectors in columns\n W = np.flip(W, axis = 0)\n U = np.flip(U, axis = 1)\n \n validity = np.cumsum(W)/np.sum(W) #represents validity of choosing first i+1 eigenvalues\n p = np.argmax(validity>=threshold) + 1\n \n if p<=1 or threshold == 1: p = X.shape[1]\n \n [Z, X3] = project(X, U, p)\n \n #Projection, P, Reconstruction, EigenVectors, EigenValues\n return [Z, p, X3, U, W]\n\n################################################################\n\t\t# Whitening #\n################################################################\n\ndef whiteningTransform(X, W, U):\n\tL = np.diag(W)\n\tZ = np.transpose(np.matmul(np.matmul(scipy.linalg.fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X, axis = 0)).transpose()))\n\treturn Z\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class PayForList(LoginRequiredMixin, ListView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
<|reserved_special_token_0|>
class PayForCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payfor_create.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayForDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payfor_delete.html'
model = PayFor
success_url = reverse_lazy('money_easy:pay_for_list')
<|reserved_special_token_0|>
class PayForUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payfor_update.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_for_list')
<|reserved_special_token_0|>
class PayItemList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payitem_list.html'
model = PayItem
<|reserved_special_token_0|>
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
<|reserved_special_token_0|>
class PayItemDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payitem_detail.html'
model = PayItem
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
<|reserved_special_token_0|>
class PayItemCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payitem_create.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayItemDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payitem_delete.html'
model = PayItem
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayItemUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payitem_update.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PayForList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payfor_list.html'
model = PayFor
<|reserved_special_token_0|>
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
<|reserved_special_token_0|>
class PayForCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payfor_create.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayForDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payfor_delete.html'
model = PayFor
success_url = reverse_lazy('money_easy:pay_for_list')
<|reserved_special_token_0|>
class PayForUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payfor_update.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_for_list')
<|reserved_special_token_0|>
class PayItemList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payitem_list.html'
model = PayItem
<|reserved_special_token_0|>
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
<|reserved_special_token_0|>
class PayItemDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payitem_detail.html'
model = PayItem
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
<|reserved_special_token_0|>
class PayItemCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payitem_create.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayItemDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payitem_delete.html'
model = PayItem
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayItemUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payitem_update.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def signup_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
try:
User.objects.get(username=input_username)
return render(request, 'registration/signup.html', {'error':
'このユーザーは登録されています'})
except:
user = User.objects.create_user(input_username, '', input_password)
return redirect('money_easy:login')
return render(request, 'registration/signup.html', {})
def login_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
user = authenticate(request, username=input_username, password=
input_password)
if user is not None:
login(request, user)
return redirect('money_easy:pay_item_list')
else:
return render(request, 'registration/login.html', {'error':
'ユーザー名かパスワードが間違っています。もう一度入力してください。'})
else:
return render(request, 'registration/login.html')
@login_required()
def logout_func(request):
logout(request)
return redirect('money_easy:login')
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'money_easy/index.html'
<|reserved_special_token_0|>
class PayForList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payfor_list.html'
model = PayFor
<|reserved_special_token_0|>
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
<|reserved_special_token_0|>
class PayForCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payfor_create.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayForDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payfor_delete.html'
model = PayFor
success_url = reverse_lazy('money_easy:pay_for_list')
<|reserved_special_token_0|>
class PayForUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payfor_update.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_for_list')
<|reserved_special_token_0|>
class PayItemList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payitem_list.html'
model = PayItem
<|reserved_special_token_0|>
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
<|reserved_special_token_0|>
class PayItemDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payitem_detail.html'
model = PayItem
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
<|reserved_special_token_0|>
class PayItemCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payitem_create.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayItemDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payitem_delete.html'
model = PayItem
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
class PayItemUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payitem_update.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def signup_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
try:
User.objects.get(username=input_username)
return render(request, 'registration/signup.html', {'error':
'このユーザーは登録されています'})
except:
user = User.objects.create_user(input_username, '', input_password)
return redirect('money_easy:login')
return render(request, 'registration/signup.html', {})
def login_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
user = authenticate(request, username=input_username, password=
input_password)
if user is not None:
login(request, user)
return redirect('money_easy:pay_item_list')
else:
return render(request, 'registration/login.html', {'error':
'ユーザー名かパスワードが間違っています。もう一度入力してください。'})
else:
return render(request, 'registration/login.html')
@login_required()
def logout_func(request):
logout(request)
return redirect('money_easy:login')
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'money_easy/index.html'
index = IndexView.as_view()
class PayForList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payfor_list.html'
model = PayFor
pay_for_list = PayForList.as_view()
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
pay_for_detail = PayForDetailView.as_view()
class PayForCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payfor_create.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_item_list')
pay_for_create = PayForCreate.as_view()
class PayForDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payfor_delete.html'
model = PayFor
success_url = reverse_lazy('money_easy:pay_for_list')
pay_for_delete = PayForDelete.as_view()
class PayForUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payfor_update.html'
model = PayFor
fields = 'name', 'description'
success_url = reverse_lazy('money_easy:pay_for_list')
pay_for_update = PayForUpdate.as_view()
class PayItemList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payitem_list.html'
model = PayItem
pay_item_list = PayItemList.as_view()
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
payfor_detail = PayForDetailView.as_view()
class PayItemDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payitem_detail.html'
model = PayItem
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
pay_item_detail = PayItemDetailView.as_view()
class PayItemCreate(LoginRequiredMixin, CreateView):
template_name = 'money_easy/payitem_create.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_create = PayItemCreate.as_view()
class PayItemDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payitem_delete.html'
model = PayItem
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_delete = PayItemDelete.as_view()
class PayItemUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payitem_update.html'
model = PayItem
fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_update = PayItemUpdate.as_view()
<|reserved_special_token_1|>
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView, ListView, DetailView, CreateView, DeleteView, UpdateView
from .models import PayFor, PayItem
from .forms import SignupForm
# Create your views here.
def signup_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
try:
User.objects.get(username=input_username)
return render(request, 'registration/signup.html', {'error': 'このユーザーは登録されています'})
except:
user = User.objects.create_user(input_username, '', input_password)
return redirect('money_easy:login')
return render(request, 'registration/signup.html', {})
def login_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
user = authenticate(request, username=input_username, password=input_password)
if user is not None:
login(request, user)
return redirect('money_easy:pay_item_list')
else:
return render(request, 'registration/login.html', {'error': 'ユーザー名かパスワードが間違っています。もう一度入力してください。'})
else:
return render(request, 'registration/login.html')
@login_required()
def logout_func(request):
logout(request)
return redirect('money_easy:login')
# class SignupView(CreateView):
# form_class = SignupForm
# success_url = reverse_lazy('home')
# template_name = 'registration/signup.html'
#
# def form_valid(self, form):
# # self.objectにsave()されたユーザーオブジェクトを格納
# valid = super().form_valid(form)
# login(self.request, self.object)
# return valid
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'money_easy/index.html'
index = IndexView.as_view()
class PayForList(LoginRequiredMixin,ListView):
template_name = 'money_easy/payfor_list.html'
model = PayFor
pay_for_list = PayForList.as_view()
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
pay_for_detail = PayForDetailView.as_view()
class PayForCreate(LoginRequiredMixin,CreateView):
template_name = 'money_easy/payfor_create.html'
model = PayFor
fields = ('name', 'description')
success_url = reverse_lazy('money_easy:pay_item_list')
pay_for_create = PayForCreate.as_view()
class PayForDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payfor_delete.html'
model = PayFor
success_url = reverse_lazy('money_easy:pay_for_list')
pay_for_delete = PayForDelete.as_view()
class PayForUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payfor_update.html'
model = PayFor
fields = ('name', 'description')
success_url = reverse_lazy('money_easy:pay_for_list')
pay_for_update = PayForUpdate.as_view()
class PayItemList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payitem_list.html'
model = PayItem
pay_item_list = PayItemList.as_view()
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
payfor_detail = PayForDetailView.as_view()
class PayItemDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payitem_detail.html'
model = PayItem
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# context['priority'] = PayItem.get_priority_display()
return context
pay_item_detail = PayItemDetailView.as_view()
class PayItemCreate(LoginRequiredMixin,CreateView):
template_name = 'money_easy/payitem_create.html'
model = PayItem
fields = ('title', 'payfor', 'money', 'rate', 'priority', 'duedate')
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_create = PayItemCreate.as_view()
class PayItemDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payitem_delete.html'
model = PayItem
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_delete = PayItemDelete.as_view()
class PayItemUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payitem_update.html'
model = PayItem
fields = ('title', 'payfor', 'money', 'rate', 'priority', 'duedate')
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_update = PayItemUpdate.as_view()
# class LoginView(AuthLoginView):
# template_name = 'money_easy/login.html'
#
#
# login = LoginView.as_view()
# def hello(request):
# if request.method == 'GET':
# context = {
# 'message': 'Hello World',
# }
# return render(request, 'hello.html',context)
#
#
# class HelloView(View):
# def get(self, request, *args, **kwargs):
# context = {
# 'message': 'Hello World',
# }
# return render(request,'hello.html',context)
#
#
# hello = HelloView.as_view()
|
flexible
|
{
"blob_id": "dc9b5fbe082f7cf6cd0a9cb0d1b5a662cf3496f0",
"index": 4768,
"step-1": "<mask token>\n\n\nclass PayForList(LoginRequiredMixin, ListView):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayForCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payfor_create.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayForDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payfor_delete.html'\n model = PayFor\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\n<mask token>\n\n\nclass PayForUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payfor_update.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\n<mask token>\n\n\nclass PayItemList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payitem_list.html'\n model = PayItem\n\n\n<mask token>\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayItemDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payitem_detail.html'\n model = PayItem\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\n<mask token>\n\n\nclass PayItemCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payitem_create.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayItemDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payitem_delete.html'\n model = PayItem\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayItemUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payitem_update.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PayForList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payfor_list.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayForCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payfor_create.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayForDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payfor_delete.html'\n model = PayFor\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\n<mask token>\n\n\nclass PayForUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payfor_update.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\n<mask token>\n\n\nclass PayItemList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payitem_list.html'\n model = PayItem\n\n\n<mask token>\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayItemDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payitem_detail.html'\n model = PayItem\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\n<mask token>\n\n\nclass PayItemCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payitem_create.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayItemDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payitem_delete.html'\n model = PayItem\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayItemUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payitem_update.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef signup_func(request):\n if request.method == 'POST':\n input_username = request.POST['username']\n input_password = request.POST['password']\n try:\n User.objects.get(username=input_username)\n return render(request, 'registration/signup.html', {'error':\n 'このユーザーは登録されています'})\n except:\n user = User.objects.create_user(input_username, '', input_password)\n return redirect('money_easy:login')\n return render(request, 'registration/signup.html', {})\n\n\ndef login_func(request):\n if request.method == 'POST':\n input_username = request.POST['username']\n input_password = request.POST['password']\n user = authenticate(request, username=input_username, password=\n input_password)\n if user is not None:\n login(request, user)\n return redirect('money_easy:pay_item_list')\n else:\n return render(request, 'registration/login.html', {'error':\n 'ユーザー名かパスワードが間違っています。もう一度入力してください。'})\n else:\n return render(request, 'registration/login.html')\n\n\n@login_required()\ndef logout_func(request):\n logout(request)\n return redirect('money_easy:login')\n\n\nclass IndexView(LoginRequiredMixin, TemplateView):\n template_name = 'money_easy/index.html'\n\n\n<mask token>\n\n\nclass PayForList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payfor_list.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayForCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payfor_create.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayForDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payfor_delete.html'\n model = PayFor\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\n<mask token>\n\n\nclass PayForUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payfor_update.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\n<mask token>\n\n\nclass PayItemList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payitem_list.html'\n model = PayItem\n\n\n<mask token>\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\n<mask token>\n\n\nclass PayItemDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payitem_detail.html'\n model = PayItem\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\n<mask token>\n\n\nclass PayItemCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payitem_create.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayItemDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payitem_delete.html'\n model = PayItem\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n\n\nclass PayItemUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payitem_update.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef signup_func(request):\n if request.method == 'POST':\n input_username = request.POST['username']\n input_password = request.POST['password']\n try:\n User.objects.get(username=input_username)\n return render(request, 'registration/signup.html', {'error':\n 'このユーザーは登録されています'})\n except:\n user = User.objects.create_user(input_username, '', input_password)\n return redirect('money_easy:login')\n return render(request, 'registration/signup.html', {})\n\n\ndef login_func(request):\n if request.method == 'POST':\n input_username = request.POST['username']\n input_password = request.POST['password']\n user = authenticate(request, username=input_username, password=\n input_password)\n if user is not None:\n login(request, user)\n return redirect('money_easy:pay_item_list')\n else:\n return render(request, 'registration/login.html', {'error':\n 'ユーザー名かパスワードが間違っています。もう一度入力してください。'})\n else:\n return render(request, 'registration/login.html')\n\n\n@login_required()\ndef logout_func(request):\n logout(request)\n return redirect('money_easy:login')\n\n\nclass IndexView(LoginRequiredMixin, TemplateView):\n template_name = 'money_easy/index.html'\n\n\nindex = IndexView.as_view()\n\n\nclass PayForList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payfor_list.html'\n model = PayFor\n\n\npay_for_list = PayForList.as_view()\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\npay_for_detail = PayForDetailView.as_view()\n\n\nclass PayForCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payfor_create.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_for_create = PayForCreate.as_view()\n\n\nclass PayForDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payfor_delete.html'\n model = PayFor\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\npay_for_delete = PayForDelete.as_view()\n\n\nclass PayForUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payfor_update.html'\n model = PayFor\n fields = 'name', 'description'\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\npay_for_update = PayForUpdate.as_view()\n\n\nclass PayItemList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payitem_list.html'\n model = PayItem\n\n\npay_item_list = PayItemList.as_view()\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n model = PayFor\n\n\npayfor_detail = PayForDetailView.as_view()\n\n\nclass PayItemDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payitem_detail.html'\n model = PayItem\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\npay_item_detail = PayItemDetailView.as_view()\n\n\nclass PayItemCreate(LoginRequiredMixin, CreateView):\n template_name = 'money_easy/payitem_create.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_item_create = PayItemCreate.as_view()\n\n\nclass PayItemDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payitem_delete.html'\n model = PayItem\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_item_delete = PayItemDelete.as_view()\n\n\nclass PayItemUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payitem_update.html'\n model = PayItem\n fields = 'title', 'payfor', 'money', 'rate', 'priority', 'duedate'\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_item_update = PayItemUpdate.as_view()\n",
"step-5": "from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, ListView, DetailView, CreateView, DeleteView, UpdateView\n\nfrom .models import PayFor, PayItem\nfrom .forms import SignupForm\n\n\n# Create your views here.\n\n\ndef signup_func(request):\n if request.method == 'POST':\n input_username = request.POST['username']\n input_password = request.POST['password']\n try:\n User.objects.get(username=input_username)\n return render(request, 'registration/signup.html', {'error': 'このユーザーは登録されています'})\n except:\n user = User.objects.create_user(input_username, '', input_password)\n return redirect('money_easy:login')\n return render(request, 'registration/signup.html', {})\n\n\ndef login_func(request):\n if request.method == 'POST':\n input_username = request.POST['username']\n input_password = request.POST['password']\n user = authenticate(request, username=input_username, password=input_password)\n if user is not None:\n login(request, user)\n return redirect('money_easy:pay_item_list')\n else:\n return render(request, 'registration/login.html', {'error': 'ユーザー名かパスワードが間違っています。もう一度入力してください。'})\n else:\n return render(request, 'registration/login.html')\n\n\n@login_required()\ndef logout_func(request):\n logout(request)\n return redirect('money_easy:login')\n\n\n\n# class SignupView(CreateView):\n# form_class = SignupForm\n# success_url = reverse_lazy('home')\n# template_name = 'registration/signup.html'\n#\n# def form_valid(self, form):\n# # self.objectにsave()されたユーザーオブジェクトを格納\n# valid = super().form_valid(form)\n# login(self.request, self.object)\n# return valid\n\n\nclass IndexView(LoginRequiredMixin, TemplateView):\n template_name = 'money_easy/index.html'\n\n\nindex = IndexView.as_view()\n\n\nclass PayForList(LoginRequiredMixin,ListView):\n template_name = 'money_easy/payfor_list.html'\n\n model = PayFor\n\n\npay_for_list = PayForList.as_view()\n\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n\n model = PayFor\n\n\npay_for_detail = PayForDetailView.as_view()\n\n\nclass PayForCreate(LoginRequiredMixin,CreateView):\n template_name = 'money_easy/payfor_create.html'\n\n model = PayFor\n\n fields = ('name', 'description')\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_for_create = PayForCreate.as_view()\n\n\nclass PayForDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payfor_delete.html'\n\n model = PayFor\n\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\npay_for_delete = PayForDelete.as_view()\n\n\nclass PayForUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payfor_update.html'\n\n model = PayFor\n\n fields = ('name', 'description')\n\n success_url = reverse_lazy('money_easy:pay_for_list')\n\n\npay_for_update = PayForUpdate.as_view()\n\n\n\nclass PayItemList(LoginRequiredMixin, ListView):\n template_name = 'money_easy/payitem_list.html'\n\n model = PayItem\n\n\npay_item_list = PayItemList.as_view()\n\n\nclass PayForDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payfor_detail.html'\n\n model = PayFor\n\n\npayfor_detail = PayForDetailView.as_view()\n\n\nclass PayItemDetailView(LoginRequiredMixin, DetailView):\n template_name = 'money_easy/payitem_detail.html'\n\n model = PayItem\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n # context['priority'] = PayItem.get_priority_display()\n return context\n\n\npay_item_detail = PayItemDetailView.as_view()\n\n\nclass PayItemCreate(LoginRequiredMixin,CreateView):\n template_name = 'money_easy/payitem_create.html'\n\n model = PayItem\n\n fields = ('title', 'payfor', 'money', 'rate', 'priority', 'duedate')\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_item_create = PayItemCreate.as_view()\n\n\nclass PayItemDelete(LoginRequiredMixin, DeleteView):\n template_name = 'money_easy/payitem_delete.html'\n\n model = PayItem\n\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_item_delete = PayItemDelete.as_view()\n\n\nclass PayItemUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'money_easy/payitem_update.html'\n\n model = PayItem\n\n fields = ('title', 'payfor', 'money', 'rate', 'priority', 'duedate')\n\n success_url = reverse_lazy('money_easy:pay_item_list')\n\n\npay_item_update = PayItemUpdate.as_view()\n\n# class LoginView(AuthLoginView):\n# template_name = 'money_easy/login.html'\n#\n#\n# login = LoginView.as_view()\n\n# def hello(request):\n# if request.method == 'GET':\n# context = {\n# 'message': 'Hello World',\n# }\n# return render(request, 'hello.html',context)\n#\n#\n# class HelloView(View):\n# def get(self, request, *args, **kwargs):\n# context = {\n# 'message': 'Hello World',\n# }\n# return render(request,'hello.html',context)\n#\n#\n# hello = HelloView.as_view()\n",
"step-ids": [
22,
23,
28,
29,
31
]
}
|
[
22,
23,
28,
29,
31
] |
"""
进程对象属性
"""
from multiprocessing import Process
import time
def tm():
for i in range(3):
print(time.ctime())
time.sleep(2)
p = Process(target=tm,name='Tarena')
# 设置子进程随父进程退出
p.daemon = True
p.start()
print("Name:",p.name) # 进程名称
print("PID:",p.pid) # 进程PID
print("is alive:",p.is_alive()) # 是否在生命周期
|
normal
|
{
"blob_id": "9d7bc2d93b855fbd22a4707a6237ac51069beb53",
"index": 9385,
"step-1": "<mask token>\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\n<mask token>\np.start()\nprint('Name:', p.name)\nprint('PID:', p.pid)\nprint('is alive:', p.is_alive())\n",
"step-3": "<mask token>\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\np = Process(target=tm, name='Tarena')\np.daemon = True\np.start()\nprint('Name:', p.name)\nprint('PID:', p.pid)\nprint('is alive:', p.is_alive())\n",
"step-4": "<mask token>\nfrom multiprocessing import Process\nimport time\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\np = Process(target=tm, name='Tarena')\np.daemon = True\np.start()\nprint('Name:', p.name)\nprint('PID:', p.pid)\nprint('is alive:', p.is_alive())\n",
"step-5": "\"\"\"\n进程对象属性\n\"\"\"\n\nfrom multiprocessing import Process\nimport time\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\np = Process(target=tm,name='Tarena')\n\n# 设置子进程随父进程退出\np.daemon = True\n\np.start()\nprint(\"Name:\",p.name) # 进程名称\nprint(\"PID:\",p.pid) # 进程PID\nprint(\"is alive:\",p.is_alive()) # 是否在生命周期",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from . import models
class RegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':"oldPassword"}))
password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label="Password")
password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')
|
normal
|
{
"blob_id": "503726cd2d70286189f4b8e02acaa3d5f6e29e12",
"index": 8538,
"step-1": "<mask token>\n\n\nclass ChangeEmail(forms.Form):\n <mask token>\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-2": "<mask token>\n\n\nclass RegisterForm(UserCreationForm):\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-3": "<mask token>\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-4": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom . import models\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-5": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom . import models\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':\"oldPassword\"}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\"Password\")\n password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-28 17:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('KYusers', '0017_caprofile_regs'),
]
operations = [
migrations.AddField(
model_name='message',
name='mard_read',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='caprofile',
name='regs',
field=models.ManyToManyField(blank=True, related_name='regs', to='KYusers.KYProfile'),
),
]
|
normal
|
{
"blob_id": "12c3fe8a3ca1e660eeb90b16eca17eddd47e5de7",
"index": 7124,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('KYusers', '0017_caprofile_regs')]\n operations = [migrations.AddField(model_name='message', name=\n 'mard_read', field=models.BooleanField(default=False)), migrations.\n AlterField(model_name='caprofile', name='regs', field=models.\n ManyToManyField(blank=True, related_name='regs', to=\n 'KYusers.KYProfile'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('KYusers', '0017_caprofile_regs')]\n operations = [migrations.AddField(model_name='message', name=\n 'mard_read', field=models.BooleanField(default=False)), migrations.\n AlterField(model_name='caprofile', name='regs', field=models.\n ManyToManyField(blank=True, related_name='regs', to=\n 'KYusers.KYProfile'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.8 on 2016-10-28 17:08\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('KYusers', '0017_caprofile_regs'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='mard_read',\n field=models.BooleanField(default=False),\n ),\n migrations.AlterField(\n model_name='caprofile',\n name='regs',\n field=models.ManyToManyField(blank=True, related_name='regs', to='KYusers.KYProfile'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-26 13:14
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('user_details', '0003_auto_20180226_1816'),
]
operations = [
migrations.AlterField(
model_name='token',
name='expiry_date',
field=models.DateTimeField(default=datetime.datetime(2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc)),
),
migrations.AlterField(
model_name='token',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_details.User'),
),
]
|
normal
|
{
"blob_id": "c6170678b523a105312d8ce316853859657d3c94",
"index": 2235,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user_details', '0003_auto_20180226_1816')]\n operations = [migrations.AlterField(model_name='token', name=\n 'expiry_date', field=models.DateTimeField(default=datetime.datetime\n (2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc))), migrations.\n AlterField(model_name='token', name='user_id', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'user_details.User'))]\n",
"step-4": "from __future__ import unicode_literals\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user_details', '0003_auto_20180226_1816')]\n operations = [migrations.AlterField(model_name='token', name=\n 'expiry_date', field=models.DateTimeField(default=datetime.datetime\n (2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc))), migrations.\n AlterField(model_name='token', name='user_id', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'user_details.User'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.10 on 2018-02-26 13:14\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user_details', '0003_auto_20180226_1816'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='token',\n name='expiry_date',\n field=models.DateTimeField(default=datetime.datetime(2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='token',\n name='user_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_details.User'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cProfile
import re
import pstats
import os
import functools
# cProfile.run('re.compile("foo|bar")')
def do_cprofile(filename):
"""
decorator for function profiling
:param filename:
:return:
"""
def wrapper(func):
@functools.wraps(func)
def profiled_func(*args, **kwargs):
# Flag for do profiling or not.
# DO_PROF = os.getenv('PROFILING')
DO_PROF = True
if DO_PROF:
profile = cProfile.Profile()
profile.enable()
result = func(*args, **kwargs)
profile.disable()
# Sort stat by internal time.
sortby = 'tottime'
ps = pstats.Stats(profile).sort_stats(sortby)
ps.dump_stats(filename)
else:
result = func(*args, **kwargs)
return result
return profiled_func
return wrapper
# print(f(5))
# A sample of catch the return result
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
def __get__(self, instance, cls):
self.instance = instance
return self
def __call__(self, *args):
key = args
try:
return self.results[key]
except KeyError:
self.results[key] = self.func(self.instance, *args)
return self.results[key]
@do_cprofile('./ff.prof')
# @Memoized
def f(n):
if n < 2:
return n
return f(n - 2) + f(n - 1)
f(5)
f(5)
|
normal
|
{
"blob_id": "8c055816def1c0a19e672ab4386f9b9a345b6323",
"index": 7837,
"step-1": "<mask token>\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef do_cprofile(filename):\n \"\"\"\n decorator for function profiling\n :param filename: \n :return: \n \"\"\"\n\n def wrapper(func):\n\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n return profiled_func\n return wrapper\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n@do_cprofile('./ff.prof')\ndef f(n):\n if n < 2:\n return n\n return f(n - 2) + f(n - 1)\n\n\nf(5)\nf(5)\n",
"step-4": "import cProfile\nimport re\nimport pstats\nimport os\nimport functools\n\n\ndef do_cprofile(filename):\n \"\"\"\n decorator for function profiling\n :param filename: \n :return: \n \"\"\"\n\n def wrapper(func):\n\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n return profiled_func\n return wrapper\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n@do_cprofile('./ff.prof')\ndef f(n):\n if n < 2:\n return n\n return f(n - 2) + f(n - 1)\n\n\nf(5)\nf(5)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport cProfile\nimport re\nimport pstats\nimport os\nimport functools\n\n\n# cProfile.run('re.compile(\"foo|bar\")')\n\ndef do_cprofile(filename):\n \"\"\"\n decorator for function profiling\n :param filename: \n :return: \n \"\"\"\n\n def wrapper(func):\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n # Flag for do profiling or not.\n # DO_PROF = os.getenv('PROFILING')\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n # Sort stat by internal time.\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n\n return profiled_func\n\n return wrapper\n\n\n# print(f(5))\n\n\n# A sample of catch the return result\nclass Memoized(object):\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n@do_cprofile('./ff.prof')\n# @Memoized\ndef f(n):\n if n < 2:\n return n\n return f(n - 2) + f(n - 1)\n\n\nf(5)\nf(5)\n",
"step-ids": [
2,
4,
7,
8,
9
]
}
|
[
2,
4,
7,
8,
9
] |
with open('vocabulary.txt', 'r') as f:
for line in f:
information = line.strip().split(': ')
# print(information[0], information[1])
question = information[1]
answer = information[0]
my_answer = input(f'{question}:')
if my_answer == answer:
print('맞았습니다!')
else:
print(f'아쉽습니다. 정답은 {answer}입니다.')
|
normal
|
{
"blob_id": "34009d1aa145f4f5c55d0c5f5945c3793fbc6429",
"index": 7823,
"step-1": "<mask token>\n",
"step-2": "with open('vocabulary.txt', 'r') as f:\n for line in f:\n information = line.strip().split(': ')\n question = information[1]\n answer = information[0]\n my_answer = input(f'{question}:')\n if my_answer == answer:\n print('맞았습니다!')\n else:\n print(f'아쉽습니다. 정답은 {answer}입니다.')\n",
"step-3": "with open('vocabulary.txt', 'r') as f:\n for line in f:\n information = line.strip().split(': ')\n # print(information[0], information[1])\n question = information[1]\n answer = information[0]\n\n my_answer = input(f'{question}:')\n if my_answer == answer:\n print('맞았습니다!')\n else:\n print(f'아쉽습니다. 정답은 {answer}입니다.')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import unittest
from theoktany.serializers import serialize
class SerializerTest(unittest.TestCase):
class TestObject(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
self.__setattr__(name, value)
def test_serialize(self):
object_dict = {'firstName': 'Test', 'lastName': 'Test last'}
json_str1 = '"firstName": "Test"'
json_str2 = '"lastName": "Test last"'
serialized_str = serialize(object_dict)
self.assertIn(json_str1, serialized_str)
self.assertIn(json_str2, serialized_str)
def test_serialize_string(self):
"""Ensure that quotes are properly escaped"""
string = 'This is a "string" with \'quotes.\''
json_string = '"{}"'.format(string.replace('"', '\\"'))
self.assertEqual(serialize(string), json_string)
def test_serialize_none(self):
"""Ensure that None gets serialized to 'null'"""
self.assertEqual(serialize(None), 'null')
def test_serialize_object(self):
"""Ensure that the serializer throws an error for an unserializable object"""
test_obj = self.TestObject(prop1='x', prop2=1234)
with self.assertRaises(TypeError):
serialize(test_obj)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "4e4d6a9ed07aa03c79dade05e01f226017b13de5",
"index": 9250,
"step-1": "<mask token>\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n <mask token>\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n <mask token>\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def test_serialize(self):\n object_dict = {'firstName': 'Test', 'lastName': 'Test last'}\n json_str1 = '\"firstName\": \"Test\"'\n json_str2 = '\"lastName\": \"Test last\"'\n serialized_str = serialize(object_dict)\n self.assertIn(json_str1, serialized_str)\n self.assertIn(json_str2, serialized_str)\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n <mask token>\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def test_serialize(self):\n object_dict = {'firstName': 'Test', 'lastName': 'Test last'}\n json_str1 = '\"firstName\": \"Test\"'\n json_str2 = '\"lastName\": \"Test last\"'\n serialized_str = serialize(object_dict)\n self.assertIn(json_str1, serialized_str)\n self.assertIn(json_str2, serialized_str)\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n\n def test_serialize_none(self):\n \"\"\"Ensure that None gets serialized to 'null'\"\"\"\n self.assertEqual(serialize(None), 'null')\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\n<mask token>\n",
"step-4": "import unittest\nfrom theoktany.serializers import serialize\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def test_serialize(self):\n object_dict = {'firstName': 'Test', 'lastName': 'Test last'}\n json_str1 = '\"firstName\": \"Test\"'\n json_str2 = '\"lastName\": \"Test last\"'\n serialized_str = serialize(object_dict)\n self.assertIn(json_str1, serialized_str)\n self.assertIn(json_str2, serialized_str)\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n\n def test_serialize_none(self):\n \"\"\"Ensure that None gets serialized to 'null'\"\"\"\n self.assertEqual(serialize(None), 'null')\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
3,
4,
5,
7
]
}
|
[
3,
4,
5,
7
] |
import sys
sys.stdin = open('줄긋기.txt')
T = int(input())
for tc in range(1, T + 1):
N = int(input())
dot = [list(map(int, input().split())) for _ in range(N)]
ran = []
for a in range(N - 1):
for b in range(a + 1, N):
if dot[a][1] - dot[b][1] == 0:
if 'inf' not in ran:
ran.append('inf')
else:
K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])
if K not in ran:
ran.append(K)
print('#{} {}'.format(tc, len(ran)))
|
normal
|
{
"blob_id": "03854f48751460fdc27d42ee5c766934ee356cfd",
"index": 6161,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor tc in range(1, T + 1):\n N = int(input())\n dot = [list(map(int, input().split())) for _ in range(N)]\n ran = []\n for a in range(N - 1):\n for b in range(a + 1, N):\n if dot[a][1] - dot[b][1] == 0:\n if 'inf' not in ran:\n ran.append('inf')\n else:\n K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])\n if K not in ran:\n ran.append(K)\n print('#{} {}'.format(tc, len(ran)))\n",
"step-3": "<mask token>\nsys.stdin = open('줄긋기.txt')\nT = int(input())\nfor tc in range(1, T + 1):\n N = int(input())\n dot = [list(map(int, input().split())) for _ in range(N)]\n ran = []\n for a in range(N - 1):\n for b in range(a + 1, N):\n if dot[a][1] - dot[b][1] == 0:\n if 'inf' not in ran:\n ran.append('inf')\n else:\n K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])\n if K not in ran:\n ran.append(K)\n print('#{} {}'.format(tc, len(ran)))\n",
"step-4": "import sys\nsys.stdin = open('줄긋기.txt')\nT = int(input())\nfor tc in range(1, T + 1):\n N = int(input())\n dot = [list(map(int, input().split())) for _ in range(N)]\n ran = []\n for a in range(N - 1):\n for b in range(a + 1, N):\n if dot[a][1] - dot[b][1] == 0:\n if 'inf' not in ran:\n ran.append('inf')\n else:\n K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])\n if K not in ran:\n ran.append(K)\n print('#{} {}'.format(tc, len(ran)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def write_csv(filename, train_acc, test_acc, train_loss, test_loss,
train_error, test_error, epoch):
if epoch == 0:
with open(filename, 'w') as f:
f.write(
'train_acc,test_acc,train_loss, test_loss, train_error, test_error\n'
)
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],
test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1
], test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],
test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1
], test_error[-1]))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,
test_error, filename):
plt.style.use('bmh')
fig = plt.figure(figsize=(8, 6))
plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig = plt.figure(figsize=(8, 6))
plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig = plt.figure(figsize=(8, 6))
plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc, test_acc, train_loss, test_loss,
train_error, test_error, epoch):
if epoch == 0:
with open(filename, 'w') as f:
f.write(
'train_acc,test_acc,train_loss, test_loss, train_error, test_error\n'
)
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],
test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1
], test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],
test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1
], test_error[-1]))
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,
test_error, filename):
plt.style.use('bmh')
fig = plt.figure(figsize=(8, 6))
plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig = plt.figure(figsize=(8, 6))
plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig = plt.figure(figsize=(8, 6))
plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc, test_acc, train_loss, test_loss,
train_error, test_error, epoch):
if epoch == 0:
with open(filename, 'w') as f:
f.write(
'train_acc,test_acc,train_loss, test_loss, train_error, test_error\n'
)
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],
test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1
], test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],
test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1
], test_error[-1]))
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):
plt.style.use('bmh')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):
if epoch==0:
with open(filename, 'w') as f:
f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\n')
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
|
flexible
|
{
"blob_id": "93150eb1c6746e2b1967eb5305fa526ae36968fd",
"index": 2003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-3": "<mask token>\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,\n test_error, filename):\n plt.style.use('bmh')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n plt.close('all')\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-4": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave, imread\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,\n test_error, filename):\n plt.style.use('bmh')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n plt.close('all')\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-5": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave, imread\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):\n plt.style.use('bmh')\n\n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n\n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n \n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n\n plt.close('all')\n\n\n\ndef write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):\n if epoch==0:\n \n with open(filename, 'w') as f:\n f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n') \n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\\\n test_acc[-1],\\\n train_loss[-1],\\\n test_loss[-1],\\\n train_error[-1],\\\n test_error[-1]))\n \n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\\\n test_acc[-1],\\\n train_loss[-1],\\\n test_loss[-1],\\\n train_error[-1],\\\n test_error[-1]))\n \n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(fruits)
<|reserved_special_token_1|>
fruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']
print(fruits)
<|reserved_special_token_1|>
# common methods to delete data from list
fruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']
#pop method
# fruits.pop(1)
# del
# del fruits[1]
# remove
# fruits.remove('banana')
# append, extend, insert
# pop, remove, del
print(fruits)
|
flexible
|
{
"blob_id": "a245cb1f232b152edf40b6399686c6811c522d99",
"index": 6458,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(fruits)\n",
"step-3": "fruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']\nprint(fruits)\n",
"step-4": "# common methods to delete data from list\r\nfruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']\r\n#pop method\r\n# fruits.pop(1)\r\n\r\n\r\n# del\r\n# del fruits[1]\r\n\r\n# remove\r\n\r\n# fruits.remove('banana')\r\n\r\n# append, extend, insert\r\n# pop, remove, del\r\n\r\nprint(fruits)\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = "那位先生Beer"
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import xlrd
import numpy as np
print('输入鲈鱼的先验概率例如:70,对应70%')
a=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=15)
#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)
data=xlrd.open_workbook('xqtest.xls')
shxrange=range(data.nsheets)
sh=data.sheet_by_name("1")
L=[]
for i in range(0,(int(a))*50):
rowa_data=sh.row_values(i)
L.append(rowa_data)
L=np.array(L)
L=L[:,0:2]
G=[]
for j in range(5000,5000+(100-int(a))*50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G=np.array(G)
G=G[:,0:2]
plt.figure(figsize=(8,6))
plt.title("生成的鲈鱼和鲑鱼数据的散点图",fontproperties=font_set)
plt.xlabel("长度",fontproperties=font_set)
plt.ylabel("宽度",fontproperties=font_set)
plt.scatter(L[:,0],L[:,1],marker="o",label="鲈鱼")
plt.scatter(G[:,0],G[:,1],marker="s",label="鲑鱼")
# 分类模型
x = np.linspace(0,8)
y = -x+9
plt.plot(x,y, color="red")
plt.legend()
plt.show()
#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:
#计算准确率
count=0
for i in L:
if i[0]+i[1]<=9:
count=count+1
q=(count/((int(a))*50))
print('鲈鱼准确率:%s'%(count/((int(a))*50)))
countG=0
for i in G:
if i[0]+i[1]>=9:
countG=countG+1
p=(countG/((100-int(a))*50))
print('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))
#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)
pb=(int(a)/100)*q + (1-(int(a)/100))*p
print(pb)
#p(ab)=p(b|a)*p(a)
pab=(int(a)/100)*q
print(pab)
print(pab/pb)
|
normal
|
{
"blob_id": "077b6d3d7417bbc26e9f23af6f437ff05e3d5771",
"index": 812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('输入鲈鱼的先验概率例如:70,对应70%')\n<mask token>\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\n<mask token>\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\n<mask token>\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\n<mask token>\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\n<mask token>\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\n<mask token>\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\n<mask token>\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\n<mask token>\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\n<mask token>\nprint(pb)\n<mask token>\nprint(pab)\nprint(pab / pb)\n",
"step-3": "__author__ = '那位先生Beer'\n<mask token>\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=15)\ndata = xlrd.open_workbook('xqtest.xls')\nshxrange = range(data.nsheets)\nsh = data.sheet_by_name('1')\nL = []\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\nL = np.array(L)\nL = L[:, 0:2]\nG = []\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG = np.array(G)\nG = G[:, 0:2]\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\nx = np.linspace(0, 8)\ny = -x + 9\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\ncount = 0\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\nq = count / (int(a) * 50)\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\ncountG = 0\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\np = countG / ((100 - int(a)) * 50)\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\npb = int(a) / 100 * q + (1 - int(a) / 100) * p\nprint(pb)\npab = int(a) / 100 * q\nprint(pab)\nprint(pab / pb)\n",
"step-4": "__author__ = '那位先生Beer'\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport xlrd\nimport numpy as np\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=15)\ndata = xlrd.open_workbook('xqtest.xls')\nshxrange = range(data.nsheets)\nsh = data.sheet_by_name('1')\nL = []\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\nL = np.array(L)\nL = L[:, 0:2]\nG = []\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG = np.array(G)\nG = G[:, 0:2]\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\nx = np.linspace(0, 8)\ny = -x + 9\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\ncount = 0\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\nq = count / (int(a) * 50)\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\ncountG = 0\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\np = countG / ((100 - int(a)) * 50)\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\npb = int(a) / 100 * q + (1 - int(a) / 100) * p\nprint(pb)\npab = int(a) / 100 * q\nprint(pab)\nprint(pab / pb)\n",
"step-5": "__author__ = \"那位先生Beer\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport xlrd\nimport numpy as np\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=15)\n#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)\ndata=xlrd.open_workbook('xqtest.xls')\nshxrange=range(data.nsheets)\nsh=data.sheet_by_name(\"1\")\nL=[]\nfor i in range(0,(int(a))*50):\n rowa_data=sh.row_values(i)\n L.append(rowa_data)\nL=np.array(L)\nL=L[:,0:2]\n\nG=[]\nfor j in range(5000,5000+(100-int(a))*50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG=np.array(G)\nG=G[:,0:2]\nplt.figure(figsize=(8,6))\nplt.title(\"生成的鲈鱼和鲑鱼数据的散点图\",fontproperties=font_set)\nplt.xlabel(\"长度\",fontproperties=font_set)\nplt.ylabel(\"宽度\",fontproperties=font_set)\nplt.scatter(L[:,0],L[:,1],marker=\"o\",label=\"鲈鱼\")\nplt.scatter(G[:,0],G[:,1],marker=\"s\",label=\"鲑鱼\")\n# 分类模型\nx = np.linspace(0,8)\ny = -x+9\nplt.plot(x,y, color=\"red\")\nplt.legend()\nplt.show()\n\n\n#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:\n#计算准确率\ncount=0\nfor i in L:\n if i[0]+i[1]<=9:\n count=count+1\nq=(count/((int(a))*50))\nprint('鲈鱼准确率:%s'%(count/((int(a))*50)))\ncountG=0\nfor i in G:\n if i[0]+i[1]>=9:\n countG=countG+1\np=(countG/((100-int(a))*50))\nprint('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))\n\n#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)\npb=(int(a)/100)*q + (1-(int(a)/100))*p\nprint(pb)\n#p(ab)=p(b|a)*p(a)\npab=(int(a)/100)*q\nprint(pab)\nprint(pab/pb)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestVCSBoxfill(basevcstest.VCSBaseTest):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestVCSBoxfill(basevcstest.VCSBaseTest):
def testRobinsonBoxfill(self):
clt3 = self.clt('clt', latitude=(-90.0, 90.0), squeeze=1, longitude
=(-180, 200.0), time=('1979-01', '1988-12'))
gmBoxfill = self.x.getboxfill('a_robinson_boxfill')
kwargs = {}
kwargs['cdmsfile'] = self.clt.id
kwargs['bg'] = self.bg
self.x.plot(clt3, gmBoxfill, **kwargs)
self.checkImage('test_vcs_boxfill_robinson_wrap.png')
<|reserved_special_token_1|>
import basevcstest
class TestVCSBoxfill(basevcstest.VCSBaseTest):
def testRobinsonBoxfill(self):
clt3 = self.clt('clt', latitude=(-90.0, 90.0), squeeze=1, longitude
=(-180, 200.0), time=('1979-01', '1988-12'))
gmBoxfill = self.x.getboxfill('a_robinson_boxfill')
kwargs = {}
kwargs['cdmsfile'] = self.clt.id
kwargs['bg'] = self.bg
self.x.plot(clt3, gmBoxfill, **kwargs)
self.checkImage('test_vcs_boxfill_robinson_wrap.png')
<|reserved_special_token_1|>
import basevcstest
class TestVCSBoxfill(basevcstest.VCSBaseTest):
def testRobinsonBoxfill(self):
# This tests if extending the longitude to more than 360 decrees is handled correctly by
# proj4. See https://github.com/UV-CDAT/uvcdat/issues/1728 for more
# information.
clt3 = self.clt('clt', latitude=(-90.0, 90.0), squeeze=1,
longitude=(-180, 200.0), time=('1979-01', '1988-12'),)
gmBoxfill = self.x.getboxfill('a_robinson_boxfill')
kwargs = {}
kwargs['cdmsfile'] = self.clt.id
kwargs['bg'] = self.bg
self.x.plot(clt3, gmBoxfill, **kwargs)
self.checkImage("test_vcs_boxfill_robinson_wrap.png")
|
flexible
|
{
"blob_id": "c1475209d9c9a98d72d7f703e0516aceaeb13163",
"index": 6820,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestVCSBoxfill(basevcstest.VCSBaseTest):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestVCSBoxfill(basevcstest.VCSBaseTest):\n\n def testRobinsonBoxfill(self):\n clt3 = self.clt('clt', latitude=(-90.0, 90.0), squeeze=1, longitude\n =(-180, 200.0), time=('1979-01', '1988-12'))\n gmBoxfill = self.x.getboxfill('a_robinson_boxfill')\n kwargs = {}\n kwargs['cdmsfile'] = self.clt.id\n kwargs['bg'] = self.bg\n self.x.plot(clt3, gmBoxfill, **kwargs)\n self.checkImage('test_vcs_boxfill_robinson_wrap.png')\n",
"step-4": "import basevcstest\n\n\nclass TestVCSBoxfill(basevcstest.VCSBaseTest):\n\n def testRobinsonBoxfill(self):\n clt3 = self.clt('clt', latitude=(-90.0, 90.0), squeeze=1, longitude\n =(-180, 200.0), time=('1979-01', '1988-12'))\n gmBoxfill = self.x.getboxfill('a_robinson_boxfill')\n kwargs = {}\n kwargs['cdmsfile'] = self.clt.id\n kwargs['bg'] = self.bg\n self.x.plot(clt3, gmBoxfill, **kwargs)\n self.checkImage('test_vcs_boxfill_robinson_wrap.png')\n",
"step-5": "import basevcstest\n\n\nclass TestVCSBoxfill(basevcstest.VCSBaseTest):\n def testRobinsonBoxfill(self):\n # This tests if extending the longitude to more than 360 decrees is handled correctly by\n # proj4. See https://github.com/UV-CDAT/uvcdat/issues/1728 for more\n # information.\n clt3 = self.clt('clt', latitude=(-90.0, 90.0), squeeze=1,\n longitude=(-180, 200.0), time=('1979-01', '1988-12'),)\n gmBoxfill = self.x.getboxfill('a_robinson_boxfill')\n kwargs = {}\n kwargs['cdmsfile'] = self.clt.id\n kwargs['bg'] = self.bg\n self.x.plot(clt3, gmBoxfill, **kwargs)\n self.checkImage(\"test_vcs_boxfill_robinson_wrap.png\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 18:21:37 2021
@author: benoitdeschrynmakers
"""
import requests
url = 'http://127.0.0.1:8888/productionplan'
if __name__ == "__main__":
filename = "example_payloads/payload1.json"
data = open(filename, 'rb').read()
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
response = requests.post(url, data=data, headers=headers)
if response.ok:
print(response.json())
else:
print("error!")
|
normal
|
{
"blob_id": "255130082ee5f8428f1700b47dee717465fed72f",
"index": 4067,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n filename = 'example_payloads/payload1.json'\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n response = requests.post(url, data=data, headers=headers)\n if response.ok:\n print(response.json())\n else:\n print('error!')\n",
"step-3": "<mask token>\nurl = 'http://127.0.0.1:8888/productionplan'\nif __name__ == '__main__':\n filename = 'example_payloads/payload1.json'\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n response = requests.post(url, data=data, headers=headers)\n if response.ok:\n print(response.json())\n else:\n print('error!')\n",
"step-4": "<mask token>\nimport requests\nurl = 'http://127.0.0.1:8888/productionplan'\nif __name__ == '__main__':\n filename = 'example_payloads/payload1.json'\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n response = requests.post(url, data=data, headers=headers)\n if response.ok:\n print(response.json())\n else:\n print('error!')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 18 18:21:37 2021\n\n@author: benoitdeschrynmakers\n\"\"\"\n\nimport requests\n\nurl = 'http://127.0.0.1:8888/productionplan'\n\nif __name__ == \"__main__\":\n filename = \"example_payloads/payload1.json\"\n\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}\n response = requests.post(url, data=data, headers=headers)\n\n if response.ok:\n print(response.json())\n else:\n print(\"error!\")\n \n \n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import ambulance_game as abg
import numpy as np
import sympy as sym
from sympy.abc import a, b, c, d, e, f, g, h, i, j
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):
Q_sym = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
dimension = Q_sym.shape[0]
if dimension > 7:
return "Capacity of 6 exceeded"
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
all_states_1222 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)
sym_state_recursive_ratios_1222 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(
sym_state_probs_1222[1] / sym_state_probs_1222[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1222[0, 2] = sym.factor(
sym_state_probs_1222[2] / sym_state_probs_1222[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1222[1, 2] = sym.factor(
sym_state_probs_1222[3] / sym_state_probs_1222[2]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1222[2, 2] = sym.factor(
sym_state_probs_1222[4] / sym_state_probs_1222[3]
) # (0,2) -> (1,2)
return sym_state_probs_1222, sym_state_recursive_ratios_1222
def get_symbolic_state_probabilities_1121():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 1
all_states_1121 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_pi_1121 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]
sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)
sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)
sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)
sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)
sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)
sym_state_recursive_ratios_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1121[0, 0] = 1
sym_state_recursive_ratios_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()
sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_P0_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1121[0, 0] = 1
sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[0]
) # (0,0) -> (1,2)
return (
sym_state_probs_1121,
sym_state_recursive_ratios_1121,
sym_state_recursive_ratios_right_1121,
sym_state_recursive_ratios_P0_1121,
)
def get_symbolic_state_probabilities_1122():
# num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1122[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2) * (sym_mu**5)
+ (sym_lambda_2**2) * (sym_mu**4)
) # (0,0)
sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (
sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2
) # (0,1)
sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (
sym_lambda_2**2
+ sym_lambda_2 * sym_lambda_1
+ sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
) # (1,1)
sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (
sym_lambda_2**2
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
+ sym_lambda_1**2
) # (2,1)
sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (
sym_lambda_2 + sym_mu
) # (0,2)
sym_state_probs_1122[5] = (
sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2
) * (
2 * sym_mu + sym_lambda_1 + sym_lambda_2
) # (1,2)
sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (
sym_lambda_1**2
+ 4 * sym_lambda_1 * sym_mu
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_mu**2
+ sym_lambda_2**2
+ 3 * sym_lambda_2 * sym_mu
) # (2,2)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[2]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[4]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[5]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_P0_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0]
) # (0,0) -> (2,2)
return (
sym_state_probs_1122,
sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122,
)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
"p00, p01, p11, p21, p31, p02, p12, p22, p32"
)
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix(
[Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]
)
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve(
[
eq0_1123,
eq1_1123,
eq2_1123,
eq3_1123,
eq4_1123,
eq5_1123,
eq6_1123,
eq7_1123,
eq8_1123,
],
(p00, p01, p11, p21, p31, p02, p12, p22, p32),
)
sym_state_recursive_ratios_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p21]
) # (2,1) -> (3,1)
sym_state_recursive_ratios_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_P0_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00]
) # (0,0) -> (3,1)
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00]
) # (0,0) -> (3,2)
return (
sym_state_probs_1123,
sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123,
)
def get_symbolic_state_probabilities_1341():
# num_of_servers = 1
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)
sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (
sym_mu**5
) # (0,1)
sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (
sym_Lambda**2
) * (
sym_mu**4
) # (0,2)
sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (
sym_Lambda**3
) * (
sym_mu**3
) # (0,3)
sym_state_probs_1341[4] = (
(sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
+ (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)
+ (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu
) # (1,3)
sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)
sym_state_probs_1341[6] = (
(sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2
+ (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)
+ 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
) # (1,4)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[2]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[3]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[3]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[5]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0]
) # (0,0) -> (1,4)
return (
sym_state_probs_1341,
sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341,
)
def get_symbolic_state_probabilities_1131():
# num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
# (0,0)
sym_state_probs_1131[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2 * (sym_mu**5))
+ ((sym_lambda_2**2) * (sym_mu**4))
+ (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))
)
# (0,1)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
# (1,1)
sym_state_probs_1131[2] = (
(sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))
+ 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))
+ 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))
+ (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * (sym_mu**4))
)
# (0,2)
sym_state_probs_1131[3] = (
sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)
)
# (1,2)
sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 2 * (sym_mu**2)
)
# (0,3)
sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)
# (1,3)
sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 3 * (sym_mu**2)
)
denominator = (
sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2
+ sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu
+ sym_Lambda * sym_lambda_2**3 * sym_mu**2
+ 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2
+ 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3
+ sym_Lambda * sym_lambda_2 * sym_lambda_1**4
+ 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu
+ 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2
+ 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3
+ 3 * sym_Lambda * sym_lambda_2 * sym_mu**4
+ sym_Lambda * sym_lambda_1**2 * sym_mu**3
+ sym_Lambda * sym_lambda_1 * sym_mu**4
+ sym_Lambda * sym_mu**5
+ sym_lambda_2**2 * sym_mu**4
+ sym_lambda_2 * sym_lambda_1 * sym_mu**4
+ 2 * sym_lambda_2 * sym_mu**5
+ sym_mu**6
)
sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[3]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[5]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_P0_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0]
) # (0,0) -> (1,3)
return (
sym_state_probs_1131,
sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131,
)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23"
)
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix(
[Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]
)
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve(
[
eq0_1132,
eq1_1132,
eq2_1132,
eq3_1132,
eq4_1132,
eq5_1132,
eq6_1132,
eq7_1132,
eq8_1132,
eq9_1132,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),
)
sym_state_recursive_ratios_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_P0_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00]
) # (0,0) -> (2,3)
return (
sym_state_probs_1132,
sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132,
)
def get_symbolic_state_probabilities_1141():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 1
Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14"
)
pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])
dimension_1141 = Q_sym_1141.shape[0]
M_sym_1141 = sym.Matrix(
[Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]
)
sym_diff_equations_1141 = M_sym_1141 @ pi_1141
b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])
eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])
eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])
eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])
eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])
eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])
eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])
eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])
eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])
eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])
sym_state_probs_1141 = sym.solve(
[
eq0_1141,
eq1_1141,
eq2_1141,
eq3_1141,
eq4_1141,
eq5_1141,
eq6_1141,
eq7_1141,
eq8_1141,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14),
)
sym_state_recursive_ratios_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1141[0, 0] = 1
sym_state_recursive_ratios_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()
sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1141[0, 0] = 1
sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p00]
) # (0,0) -> (1,4)
return (
sym_state_probs_1141,
sym_state_recursive_ratios_1141,
sym_state_recursive_ratios_right_1141,
sym_state_recursive_ratios_P0_1141,
)
def get_symbolic_state_probabilities_1142():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 2
Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24"
)
pi_1142 = sym.Matrix(
[p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]
)
dimension_1142 = Q_sym_1142.shape[0]
M_sym_1142 = sym.Matrix(
[Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]
)
sym_diff_equations_1142 = M_sym_1142 @ pi_1142
b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])
eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])
eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])
eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])
eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])
eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])
eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])
eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])
eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])
eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])
eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])
eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])
eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])
eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])
sym_state_probs_1142 = sym.solve(
[
eq0_1142,
eq1_1142,
eq2_1142,
eq3_1142,
eq4_1142,
eq5_1142,
eq6_1142,
eq7_1142,
eq8_1142,
eq9_1142,
eq10_1142,
eq11_1142,
eq12_1142,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),
)
sym_state_recursive_ratios_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1142[0, 0] = 1
sym_state_recursive_ratios_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p14]
) # (1,4) -> (2,4)
sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()
sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p23]
) # (2,3) -> (2,4)
sym_state_recursive_ratios_P0_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1142[0, 0] = 1
sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p00]
) # (0,0) -> (2,3)
sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p00]
) # (0,0) -> (2,4)
return (
sym_state_probs_1142,
sym_state_recursive_ratios_1142,
sym_state_recursive_ratios_right_1142,
sym_state_recursive_ratios_P0_1142,
)
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15"
)
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix(
[Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]
)
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve(
[
eq0_1151,
eq1_1151,
eq2_1151,
eq3_1151,
eq4_1151,
eq5_1151,
eq6_1151,
eq7_1151,
eq8_1151,
eq9_1151,
eq10_1151,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),
)
sym_state_recursive_ratios_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_P0_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00]
) # (0,0) -> (1,5)
return (
sym_state_probs_1151,
sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151,
)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16"
)
pi_1161 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]
)
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix(
[Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]
)
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve(
[
eq0_1161,
eq1_1161,
eq2_1161,
eq3_1161,
eq4_1161,
eq5_1161,
eq6_1161,
eq7_1161,
eq8_1161,
eq9_1161,
eq10_1161,
eq11_1161,
eq12_1161,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),
)
sym_state_recursive_ratios_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_P0_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00]
) # (0,0) -> (1,6)
return (
sym_state_probs_1161,
sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161,
)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17"
)
pi_1171 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]
)
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix(
[Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]
)
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve(
[
eq0_1171,
eq1_1171,
eq2_1171,
eq3_1171,
eq4_1171,
eq5_1171,
eq6_1171,
eq7_1171,
eq8_1171,
eq9_1171,
eq10_1171,
eq11_1171,
eq12_1171,
eq13_1171,
eq14_1171,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),
)
sym_state_recursive_ratios_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_P0_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00]
) # (0,0) -> (1,7)
return (
sym_state_probs_1171,
sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171,
)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18"
)
pi_1181 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
]
)
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix(
[Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]
)
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve(
[
eq0_1181,
eq1_1181,
eq2_1181,
eq3_1181,
eq4_1181,
eq5_1181,
eq6_1181,
eq7_1181,
eq8_1181,
eq9_1181,
eq10_1181,
eq11_1181,
eq12_1181,
eq13_1181,
eq14_1181,
eq15_1181,
eq16_1181,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
),
)
sym_state_recursive_ratios_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_P0_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00]
) # (0,0) -> (1,8)
return (
sym_state_probs_1181,
sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181,
)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19"
)
pi_1191 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
]
)
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix(
[Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]
)
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve(
[
eq0_1191,
eq1_1191,
eq2_1191,
eq3_1191,
eq4_1191,
eq5_1191,
eq6_1191,
eq7_1191,
eq8_1191,
eq9_1191,
eq10_1191,
eq11_1191,
eq12_1191,
eq13_1191,
eq14_1191,
eq15_1191,
eq16_1191,
eq17_1191,
eq18_1191,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
),
)
sym_state_recursive_ratios_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p08]
) # (0,8) -> (0,9)
sym_state_recursive_ratios_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p09]
) # (0,9) -> (1,9)
sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,8) -> (1,9)
sym_state_recursive_ratios_P0_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00]
) # (0,0) -> (1,8)
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00]
) # (0,0) -> (0,9)
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00]
) # (0,0) -> (1,9)
return (
sym_state_probs_1191,
sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191,
)
|
normal
|
{
"blob_id": "9dd59fee46bd4bec87cc8c40099110b483ad0496",
"index": 6990,
"step-1": "<mask token>\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-3": "<mask token>\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n all_states_1121 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])\n sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121\n [1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121\n [2] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121\n [3] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121\n [4] / sym_state_probs_1121[3])\n sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121\n .copy())\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2])\n sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0])\n return (sym_state_probs_1121, sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121)\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-4": "import ambulance_game as abg\nimport numpy as np\nimport sympy as sym\nfrom sympy.abc import a, b, c, d, e, f, g, h, i, j\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n all_states_1121 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])\n sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121\n [1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121\n [2] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121\n [3] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121\n [4] / sym_state_probs_1121[3])\n sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121\n .copy())\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2])\n sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0])\n return (sym_state_probs_1121, sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121)\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\ndef get_symbolic_state_probabilities_1141():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 1\n Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14')\n pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])\n dimension_1141 = Q_sym_1141.shape[0]\n M_sym_1141 = sym.Matrix([Q_sym_1141.transpose()[:-1, :], sym.ones(1,\n dimension_1141)])\n sym_diff_equations_1141 = M_sym_1141 @ pi_1141\n b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])\n eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])\n eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])\n eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])\n eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])\n eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])\n eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])\n eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])\n eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])\n eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])\n sym_state_probs_1141 = sym.solve([eq0_1141, eq1_1141, eq2_1141,\n eq3_1141, eq4_1141, eq5_1141, eq6_1141, eq7_1141, eq8_1141], (p00,\n p01, p11, p02, p12, p03, p13, p04, p14))\n sym_state_recursive_ratios_1141 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1141[0, 0] = 1\n sym_state_recursive_ratios_1141[0, 1] = sym.factor(sym_state_probs_1141\n [p01] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_1141[1, 1] = sym.factor(sym_state_probs_1141\n [p11] / sym_state_probs_1141[p01])\n sym_state_recursive_ratios_1141[0, 2] = sym.factor(sym_state_probs_1141\n [p02] / sym_state_probs_1141[p01])\n sym_state_recursive_ratios_1141[1, 2] = sym.factor(sym_state_probs_1141\n [p12] / sym_state_probs_1141[p02])\n sym_state_recursive_ratios_1141[0, 3] = sym.factor(sym_state_probs_1141\n [p03] / sym_state_probs_1141[p02])\n sym_state_recursive_ratios_1141[1, 3] = sym.factor(sym_state_probs_1141\n [p13] / sym_state_probs_1141[p03])\n sym_state_recursive_ratios_1141[0, 4] = sym.factor(sym_state_probs_1141\n [p04] / sym_state_probs_1141[p03])\n sym_state_recursive_ratios_1141[1, 4] = sym.factor(sym_state_probs_1141\n [p14] / sym_state_probs_1141[p04])\n sym_state_recursive_ratios_right_1141 = (sym_state_recursive_ratios_1141\n .copy())\n sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p11])\n sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p12])\n sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p13])\n sym_state_recursive_ratios_P0_1141 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1141[0, 0] = 1\n sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p00])\n return (sym_state_probs_1141, sym_state_recursive_ratios_1141,\n sym_state_recursive_ratios_right_1141,\n sym_state_recursive_ratios_P0_1141)\n\n\ndef get_symbolic_state_probabilities_1142():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 2\n Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = (sym.\n symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24'))\n pi_1142 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23,\n p04, p14, p24])\n dimension_1142 = Q_sym_1142.shape[0]\n M_sym_1142 = sym.Matrix([Q_sym_1142.transpose()[:-1, :], sym.ones(1,\n dimension_1142)])\n sym_diff_equations_1142 = M_sym_1142 @ pi_1142\n b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])\n eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])\n eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])\n eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])\n eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])\n eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])\n eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])\n eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])\n eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])\n eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])\n eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])\n eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])\n eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])\n eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])\n sym_state_probs_1142 = sym.solve([eq0_1142, eq1_1142, eq2_1142,\n eq3_1142, eq4_1142, eq5_1142, eq6_1142, eq7_1142, eq8_1142,\n eq9_1142, eq10_1142, eq11_1142, eq12_1142], (p00, p01, p11, p21,\n p02, p12, p22, p03, p13, p23, p04, p14, p24))\n sym_state_recursive_ratios_1142 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1142[0, 0] = 1\n sym_state_recursive_ratios_1142[0, 1] = sym.factor(sym_state_probs_1142\n [p01] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_1142[1, 1] = sym.factor(sym_state_probs_1142\n [p11] / sym_state_probs_1142[p01])\n sym_state_recursive_ratios_1142[2, 1] = sym.factor(sym_state_probs_1142\n [p21] / sym_state_probs_1142[p11])\n sym_state_recursive_ratios_1142[0, 2] = sym.factor(sym_state_probs_1142\n [p02] / sym_state_probs_1142[p01])\n sym_state_recursive_ratios_1142[1, 2] = sym.factor(sym_state_probs_1142\n [p12] / sym_state_probs_1142[p02])\n sym_state_recursive_ratios_1142[2, 2] = sym.factor(sym_state_probs_1142\n [p22] / sym_state_probs_1142[p12])\n sym_state_recursive_ratios_1142[0, 3] = sym.factor(sym_state_probs_1142\n [p03] / sym_state_probs_1142[p02])\n sym_state_recursive_ratios_1142[1, 3] = sym.factor(sym_state_probs_1142\n [p13] / sym_state_probs_1142[p03])\n sym_state_recursive_ratios_1142[2, 3] = sym.factor(sym_state_probs_1142\n [p23] / sym_state_probs_1142[p13])\n sym_state_recursive_ratios_1142[0, 4] = sym.factor(sym_state_probs_1142\n [p04] / sym_state_probs_1142[p03])\n sym_state_recursive_ratios_1142[1, 4] = sym.factor(sym_state_probs_1142\n [p14] / sym_state_probs_1142[p04])\n sym_state_recursive_ratios_1142[2, 4] = sym.factor(sym_state_probs_1142\n [p24] / sym_state_probs_1142[p14])\n sym_state_recursive_ratios_right_1142 = (sym_state_recursive_ratios_1142\n .copy())\n sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p11])\n sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p12])\n sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p13])\n sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p21])\n sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p22])\n sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p23])\n sym_state_recursive_ratios_P0_1142 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1142[0, 0] = 1\n sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p00])\n return (sym_state_probs_1142, sym_state_recursive_ratios_1142,\n sym_state_recursive_ratios_right_1142,\n sym_state_recursive_ratios_P0_1142)\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-5": "import ambulance_game as abg\nimport numpy as np\nimport sympy as sym\nfrom sympy.abc import a, b, c, d, e, f, g, h, i, j\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):\n Q_sym = abg.markov.get_symbolic_transition_matrix(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return \"Capacity of 6 exceeded\"\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n\n sym_pi_1222 = get_symbolic_pi(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n all_states_1222 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)\n\n sym_state_recursive_ratios_1222 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(\n sym_state_probs_1222[1] / sym_state_probs_1222[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(\n sym_state_probs_1222[2] / sym_state_probs_1222[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(\n sym_state_probs_1222[3] / sym_state_probs_1222[2]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(\n sym_state_probs_1222[4] / sym_state_probs_1222[3]\n ) # (0,2) -> (1,2)\n\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n\n all_states_1121 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_pi_1121 = get_symbolic_pi(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]\n\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)\n\n sym_state_recursive_ratios_1121 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[3]\n ) # (0,2) -> (1,2)\n\n sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2]\n ) # (1,1) -> (1,2)\n\n sym_state_recursive_ratios_P0_1121 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0]\n ) # (0,0) -> (1,2)\n\n return (\n sym_state_probs_1121,\n sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121,\n )\n\n\ndef get_symbolic_state_probabilities_1122():\n # num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n\n all_states_1122 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n sym_state_probs_1122[0] = (\n (sym_mu**6)\n + 2 * (sym_lambda_2) * (sym_mu**5)\n + (sym_lambda_2**2) * (sym_mu**4)\n ) # (0,0)\n sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (\n sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2\n ) # (0,1)\n sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (\n sym_lambda_2**2\n + sym_lambda_2 * sym_lambda_1\n + sym_lambda_1 * sym_mu\n + sym_mu**2\n + 2 * sym_lambda_2 * sym_mu\n ) # (1,1)\n sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (\n sym_lambda_2**2\n + 2 * sym_lambda_1 * sym_lambda_2\n + 3 * sym_lambda_1 * sym_mu\n + sym_mu**2\n + 2 * sym_lambda_2 * sym_mu\n + sym_lambda_1**2\n ) # (2,1)\n sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (\n sym_lambda_2 + sym_mu\n ) # (0,2)\n sym_state_probs_1122[5] = (\n sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2\n ) * (\n 2 * sym_mu + sym_lambda_1 + sym_lambda_2\n ) # (1,2)\n sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (\n sym_lambda_1**2\n + 4 * sym_lambda_1 * sym_mu\n + 2 * sym_lambda_1 * sym_lambda_2\n + 3 * sym_mu**2\n + sym_lambda_2**2\n + 3 * sym_lambda_2 * sym_mu\n ) # (2,2)\n\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]\n\n sym_state_recursive_ratios_1122 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[2]\n ) # (1,1) -> (2,1)\n\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[4]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[5]\n ) # (1,2) -> (2,2)\n\n sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3]\n ) # (2,1) -> (2,2)\n\n sym_state_recursive_ratios_P0_1122 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0]\n ) # (0,0) -> (2,1)\n\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0]\n ) # (0,0) -> (2,2)\n\n return (\n sym_state_probs_1122,\n sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122,\n )\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n \"p00, p01, p11, p21, p31, p02, p12, p22, p32\"\n )\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n\n M_sym_1123 = sym.Matrix(\n [Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]\n )\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n\n sym_state_probs_1123 = sym.solve(\n [\n eq0_1123,\n eq1_1123,\n eq2_1123,\n eq3_1123,\n eq4_1123,\n eq5_1123,\n eq6_1123,\n eq7_1123,\n eq8_1123,\n ],\n (p00, p01, p11, p21, p31, p02, p12, p22, p32),\n )\n\n sym_state_recursive_ratios_1123 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p21]\n ) # (2,1) -> (3,1)\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22]\n ) # (2,2) -> (3,2)\n\n sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22]\n ) # (2,2) -> (3,2)\n\n sym_state_recursive_ratios_P0_1123 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00]\n ) # (0,0) -> (2,1)\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00]\n ) # (0,0) -> (3,1)\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00]\n ) # (0,0) -> (2,2)\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00]\n ) # (0,0) -> (3,2)\n\n return (\n sym_state_probs_1123,\n sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123,\n )\n\n\ndef get_symbolic_state_probabilities_1341():\n # num_of_servers = 1\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n\n all_states_1341 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)\n sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (\n sym_mu**5\n ) # (0,1)\n sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (\n sym_Lambda**2\n ) * (\n sym_mu**4\n ) # (0,2)\n sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (\n sym_Lambda**3\n ) * (\n sym_mu**3\n ) # (0,3)\n sym_state_probs_1341[4] = (\n (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu\n + (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)\n + (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu\n ) # (1,3)\n sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)\n sym_state_probs_1341[6] = (\n (sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2\n + (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)\n + 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu\n ) # (1,4)\n\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]\n\n sym_state_recursive_ratios_1341 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[2]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[3]\n ) # (0,3) -> (0,4)\n\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[3]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[5]\n ) # (0,4) -> (1,4)\n\n sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4]\n ) # (1,3) -> (1,4)\n\n sym_state_recursive_ratios_P0_1341 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,3)\n\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0]\n ) # (0,0) -> (1,4)\n\n return (\n sym_state_probs_1341,\n sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341,\n )\n\n\ndef get_symbolic_state_probabilities_1131():\n # num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n\n all_states_1131 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n # (0,0)\n sym_state_probs_1131[0] = (\n (sym_mu**6)\n + 2 * (sym_lambda_2 * (sym_mu**5))\n + ((sym_lambda_2**2) * (sym_mu**4))\n + (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))\n )\n # (0,1)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n # (1,1)\n sym_state_probs_1131[2] = (\n (sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))\n + (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))\n + 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))\n + 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))\n + (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))\n + (sym_Lambda * sym_lambda_2 * (sym_mu**4))\n )\n # (0,2)\n sym_state_probs_1131[3] = (\n sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)\n )\n # (1,2)\n sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (\n (sym_lambda_2**2)\n + 2 * sym_lambda_2 * sym_lambda_1\n + 3 * sym_lambda_2 * sym_mu\n + (sym_lambda_1**2)\n + 2 * sym_lambda_1 * sym_mu\n + 2 * (sym_mu**2)\n )\n # (0,3)\n sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)\n # (1,3)\n sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (\n (sym_lambda_2**2)\n + 2 * sym_lambda_2 * sym_lambda_1\n + 3 * sym_lambda_2 * sym_mu\n + (sym_lambda_1**2)\n + 2 * sym_lambda_1 * sym_mu\n + 3 * (sym_mu**2)\n )\n\n denominator = (\n sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2\n + sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu\n + sym_Lambda * sym_lambda_2**3 * sym_mu**2\n + 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3\n + 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu\n + 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2\n + 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3\n + sym_Lambda * sym_lambda_2 * sym_lambda_1**4\n + 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu\n + 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2\n + 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3\n + 3 * sym_Lambda * sym_lambda_2 * sym_mu**4\n + sym_Lambda * sym_lambda_1**2 * sym_mu**3\n + sym_Lambda * sym_lambda_1 * sym_mu**4\n + sym_Lambda * sym_mu**5\n + sym_lambda_2**2 * sym_mu**4\n + sym_lambda_2 * sym_lambda_1 * sym_mu**4\n + 2 * sym_lambda_2 * sym_mu**5\n + sym_mu**6\n )\n\n sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]\n\n sym_state_recursive_ratios_1131 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[3]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[3]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[5]\n ) # (0,3) -> (1,3)\n\n sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4]\n ) # (1,2) -> (1,3)\n\n sym_state_recursive_ratios_P0_1131 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,3)\n\n return (\n sym_state_probs_1131,\n sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131,\n )\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n \"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23\"\n )\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n\n M_sym_1132 = sym.Matrix(\n [Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]\n )\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n\n sym_state_probs_1132 = sym.solve(\n [\n eq0_1132,\n eq1_1132,\n eq2_1132,\n eq3_1132,\n eq4_1132,\n eq5_1132,\n eq6_1132,\n eq7_1132,\n eq8_1132,\n eq9_1132,\n ],\n (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),\n )\n\n sym_state_recursive_ratios_1132 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p13]\n ) # (1,3) -> (2,3)\n\n sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22]\n ) # (2,2) -> (2,3)\n\n sym_state_recursive_ratios_P0_1132 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,1)\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,2)\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,3)\n\n return (\n sym_state_probs_1132,\n sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132,\n )\n\n\ndef get_symbolic_state_probabilities_1141():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 1\n\n Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14\"\n )\n pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])\n dimension_1141 = Q_sym_1141.shape[0]\n\n M_sym_1141 = sym.Matrix(\n [Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]\n )\n sym_diff_equations_1141 = M_sym_1141 @ pi_1141\n\n b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])\n\n eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])\n eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])\n eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])\n eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])\n eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])\n eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])\n eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])\n eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])\n eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])\n\n sym_state_probs_1141 = sym.solve(\n [\n eq0_1141,\n eq1_1141,\n eq2_1141,\n eq3_1141,\n eq4_1141,\n eq5_1141,\n eq6_1141,\n eq7_1141,\n eq8_1141,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14),\n )\n\n sym_state_recursive_ratios_1141 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1141[0, 0] = 1\n sym_state_recursive_ratios_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p04]\n ) # (0,4) -> (1,4)\n\n sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()\n sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p13]\n ) # (1,3) -> (1,4)\n\n sym_state_recursive_ratios_P0_1141 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1141[0, 0] = 1\n sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,4)\n\n return (\n sym_state_probs_1141,\n sym_state_recursive_ratios_1141,\n sym_state_recursive_ratios_right_1141,\n sym_state_recursive_ratios_P0_1141,\n )\n\n\ndef get_symbolic_state_probabilities_1142():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 2\n\n Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(\n \"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24\"\n )\n pi_1142 = sym.Matrix(\n [p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]\n )\n dimension_1142 = Q_sym_1142.shape[0]\n\n M_sym_1142 = sym.Matrix(\n [Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]\n )\n sym_diff_equations_1142 = M_sym_1142 @ pi_1142\n\n b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])\n\n eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])\n eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])\n eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])\n eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])\n eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])\n eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])\n eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])\n eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])\n eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])\n eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])\n eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])\n eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])\n eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])\n\n sym_state_probs_1142 = sym.solve(\n [\n eq0_1142,\n eq1_1142,\n eq2_1142,\n eq3_1142,\n eq4_1142,\n eq5_1142,\n eq6_1142,\n eq7_1142,\n eq8_1142,\n eq9_1142,\n eq10_1142,\n eq11_1142,\n eq12_1142,\n ],\n (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),\n )\n\n sym_state_recursive_ratios_1142 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1142[0, 0] = 1\n sym_state_recursive_ratios_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p13]\n ) # (1,3) -> (2,3)\n sym_state_recursive_ratios_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p14]\n ) # (1,4) -> (2,4)\n\n sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()\n sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p22]\n ) # (2,2) -> (2,3)\n sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p23]\n ) # (2,3) -> (2,4)\n\n sym_state_recursive_ratios_P0_1142 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1142[0, 0] = 1\n sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,1)\n\n sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,2)\n\n sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,3)\n\n sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,4)\n\n return (\n sym_state_probs_1142,\n sym_state_recursive_ratios_1142,\n sym_state_recursive_ratios_right_1142,\n sym_state_recursive_ratios_P0_1142,\n )\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15\"\n )\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])\n dimension_1151 = Q_sym_1151.shape[0]\n\n M_sym_1151 = sym.Matrix(\n [Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]\n )\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n\n sym_state_probs_1151 = sym.solve(\n [\n eq0_1151,\n eq1_1151,\n eq2_1151,\n eq3_1151,\n eq4_1151,\n eq5_1151,\n eq6_1151,\n eq7_1151,\n eq8_1151,\n eq9_1151,\n eq10_1151,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),\n )\n\n sym_state_recursive_ratios_1151 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p05]\n ) # (0,5) -> (1,5)\n\n sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14]\n ) # (1,4) -> (1,5)\n\n sym_state_recursive_ratios_P0_1151 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,5)\n\n return (\n sym_state_probs_1151,\n sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151,\n )\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16\"\n )\n pi_1161 = sym.Matrix(\n [p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]\n )\n dimension_1161 = Q_sym_1161.shape[0]\n\n M_sym_1161 = sym.Matrix(\n [Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]\n )\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n\n sym_state_probs_1161 = sym.solve(\n [\n eq0_1161,\n eq1_1161,\n eq2_1161,\n eq3_1161,\n eq4_1161,\n eq5_1161,\n eq6_1161,\n eq7_1161,\n eq8_1161,\n eq9_1161,\n eq10_1161,\n eq11_1161,\n eq12_1161,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),\n )\n\n sym_state_recursive_ratios_1161 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p06]\n ) # (0,6) -> (1,6)\n\n sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15]\n ) # (1,5) -> (1,6)\n\n sym_state_recursive_ratios_P0_1161 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,6)\n\n return (\n sym_state_probs_1161,\n sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161,\n )\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\"\n )\n pi_1171 = sym.Matrix(\n [p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]\n )\n dimension_1171 = Q_sym_1171.shape[0]\n\n M_sym_1171 = sym.Matrix(\n [Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]\n )\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n\n sym_state_probs_1171 = sym.solve(\n [\n eq0_1171,\n eq1_1171,\n eq2_1171,\n eq3_1171,\n eq4_1171,\n eq5_1171,\n eq6_1171,\n eq7_1171,\n eq8_1171,\n eq9_1171,\n eq10_1171,\n eq11_1171,\n eq12_1171,\n eq13_1171,\n eq14_1171,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),\n )\n\n sym_state_recursive_ratios_1171 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p07]\n ) # (0,7) -> (1,7)\n\n sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16]\n ) # (1,6) -> (1,7)\n\n sym_state_recursive_ratios_P0_1171 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,7)\n\n return (\n sym_state_probs_1171,\n sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171,\n )\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18\"\n )\n pi_1181 = sym.Matrix(\n [\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ]\n )\n dimension_1181 = Q_sym_1181.shape[0]\n\n M_sym_1181 = sym.Matrix(\n [Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]\n )\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n\n sym_state_probs_1181 = sym.solve(\n [\n eq0_1181,\n eq1_1181,\n eq2_1181,\n eq3_1181,\n eq4_1181,\n eq5_1181,\n eq6_1181,\n eq7_1181,\n eq8_1181,\n eq9_1181,\n eq10_1181,\n eq11_1181,\n eq12_1181,\n eq13_1181,\n eq14_1181,\n eq15_1181,\n eq16_1181,\n ],\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ),\n )\n\n sym_state_recursive_ratios_1181 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p07]\n ) # (0,7) -> (1,7)\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p07]\n ) # (0,7) -> (0,8)\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p08]\n ) # (0,8) -> (1,8)\n\n sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16]\n ) # (1,6) -> (1,7)\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17]\n ) # (1,7) -> (1,8)\n\n sym_state_recursive_ratios_P0_1181 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,7)\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,8)\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,8)\n\n return (\n sym_state_probs_1181,\n sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181,\n )\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19\"\n )\n pi_1191 = sym.Matrix(\n [\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ]\n )\n dimension_1191 = Q_sym_1191.shape[0]\n\n M_sym_1191 = sym.Matrix(\n [Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]\n )\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n\n sym_state_probs_1191 = sym.solve(\n [\n eq0_1191,\n eq1_1191,\n eq2_1191,\n eq3_1191,\n eq4_1191,\n eq5_1191,\n eq6_1191,\n eq7_1191,\n eq8_1191,\n eq9_1191,\n eq10_1191,\n eq11_1191,\n eq12_1191,\n eq13_1191,\n eq14_1191,\n eq15_1191,\n eq16_1191,\n eq17_1191,\n eq18_1191,\n ],\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ),\n )\n\n sym_state_recursive_ratios_1191 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p07]\n ) # (0,7) -> (1,7)\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p07]\n ) # (0,7) -> (0,8)\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p08]\n ) # (0,8) -> (1,8)\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p08]\n ) # (0,8) -> (0,9)\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p09]\n ) # (0,9) -> (1,9)\n\n sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16]\n ) # (1,6) -> (1,7)\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17]\n ) # (1,7) -> (1,8)\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17]\n ) # (1,8) -> (1,9)\n\n sym_state_recursive_ratios_P0_1191 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,7)\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,8)\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,8)\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,9)\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,9)\n\n return (\n sym_state_probs_1191,\n sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191,\n )\n",
"step-ids": [
5,
12,
13,
16,
17
]
}
|
[
5,
12,
13,
16,
17
] |
#!/usr/bin/env python3
import os
import subprocess
import logging
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Recover all ip for one component. Return format ip
def getHostsByKey(config, key):
hosts = config.get(key, "hosts").split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
# Function who return the ip of the current machine
def getIp():
ip = os.popen('ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'', "r").read()
ip = ip.replace('\n', '')
return ip
# Check if String il already present in the file
def isAlreadyAdd(pathFile, string):
file = open(pathFile)
for line in file:
if string in line:
return True
return False
def deleteLineWithString(pathFile, stringResearch):
contenu = ""
fichier = open(pathFile, "r")
for ligne in fichier:
if not (stringResearch in ligne):
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
# Function for check host
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
# Function for recover ip by using server name
def getIpServerName(config, serverName):
ip = ""
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], "hosts").split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
# Function for update file on specific server
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],
cwd=os.getcwd(),
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info("Compressing directory done [success]")
else:
logging.error("Compressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(
['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',
'xnet@' + ip + ':~/'], check=True)
if out.returncode == 0:
logging.info("Transfer done [success]")
else:
logging.error("Transferring files failed [error]")
logging.info("Detar file ...")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info("Decompressing directory done [success]")
else:
logging.error("Decompressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'rm SDTD-Mazerunner-Script.tar.gz'])
return
# Function for install basic environment
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
|
normal
|
{
"blob_id": "2c834c734de8f8740176bb5dbb6b123c49924718",
"index": 1697,
"step-1": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\n<mask token>\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\n<mask token>\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-2": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\n<mask token>\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n<mask token>\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-3": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\ndef getHostsByKey(config, key):\n hosts = config.get(key, 'hosts').split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n<mask token>\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = ''\n fichier = open(pathFile, 'r')\n for ligne in fichier:\n if not stringResearch in ligne:\n contenu += ligne\n fichier.close()\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-4": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\ndef getHostsByKey(config, key):\n hosts = config.get(key, 'hosts').split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\ndef isAlreadyAdd(pathFile, string):\n file = open(pathFile)\n for line in file:\n if string in line:\n return True\n return False\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = ''\n fichier = open(pathFile, 'r')\n for ligne in fichier:\n if not stringResearch in ligne:\n contenu += ligne\n fichier.close()\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-5": "#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport logging\n\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\n# Recover all ip for one component. Return format ip\ndef getHostsByKey(config, key):\n hosts = config.get(key, \"hosts\").split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\n# Function who return the ip of the current machine\ndef getIp():\n ip = os.popen('ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'', \"r\").read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n# Check if String il already present in the file\ndef isAlreadyAdd(pathFile, string):\n file = open(pathFile)\n for line in file:\n if string in line:\n return True\n return False\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = \"\"\n fichier = open(pathFile, \"r\")\n for ligne in fichier:\n if not (stringResearch in ligne):\n contenu += ligne\n fichier.close()\n\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\n# Function for check host\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\n# Function for recover ip by using server name\ndef getIpServerName(config, serverName):\n ip = \"\"\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], \"hosts\").split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\n# Function for update file on specific server\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],\n cwd=os.getcwd(),\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info(\"Compressing directory done [success]\")\n else:\n logging.error(\"Compressing directory failed [error]\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(\n ['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',\n 'xnet@' + ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info(\"Transfer done [success]\")\n else:\n logging.error(\"Transferring files failed [error]\")\n logging.info(\"Detar file ...\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info(\"Decompressing directory done [success]\")\n else:\n logging.error(\"Decompressing directory failed [error]\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\n# Function for install basic environment\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
from estmd import ESTMD
input_directory = "test.avi"
e = ESTMD()
e.open_movie(input_directory)
e.run(by_frame=True)
r = e.create_list_of_arrays()
print "Done testing!"
|
normal
|
{
"blob_id": "1fd4d1a44270ef29512e601af737accb916dc441",
"index": 974,
"step-1": "from estmd import ESTMD\n\ninput_directory = \"test.avi\"\ne = ESTMD()\ne.open_movie(input_directory)\ne.run(by_frame=True)\nr = e.create_list_of_arrays()\n\nprint \"Done testing!\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
__author__ = 'simon.hughes'
from sklearn.feature_extraction import DictVectorizer
from WindowFeatures import compute_middle_index
from collections import Counter
class WindowFeatureExtractor(object):
"""
A simple wrapper class that takes a number of window based feature extractor
functions and applies them to a dataset of windows, and then vectorizes with
the sklearn DictVectorizer class
"""
def __init__(self, feature_extractors, min_feat_frequency, sparse=True, feature_val=1):
"""
feature_extractors : list of fns
feature extraction fns
min_feat_frequency : int
minimum frequency of features to retain
sparse : boolean
return a sparse numpy matrix or not
"""
self.feature_extractors = feature_extractors
self.min_feat_frequency = min_feat_frequency
self.vectorizer = DictVectorizer(sparse=sparse)
self.feature_val = feature_val
def fit(self, X, y=None):
"""
X : list of list of str
list of word windows
y : ignored
returns : numpy array (sparse is sparse = True)
"""
feats = self.__extract_features_(X)
return self.vectorizer.fit(feats)
def transform(self, X, y=None):
return self.vectorizer.transform(X, y)
def fit_transform(self, X,y=None):
feats = self.__extract_features_(X)
return self.vectorizer.fit_transform(feats)
def __extract_features_(self, X):
if len(X) == 0:
raise Exception("Empty list passed to WindowFeatureExtractor.fit")
mid_ix = compute_middle_index(X[0])
all_feats = []
keys = []
for window in X:
d = {}
for fn in self.feature_extractors:
fts = fn(window, mid_ix, self.feature_val)
d.update(fts)
keys.extend(d.keys())
all_feats.append(d)
if self.min_feat_frequency <= 1:
return all_feats
""" Filter to at or above minimum feature frequency """
keyCnt = Counter(keys)
frequent = set([k for k,v in keyCnt.items() if v >= self.min_feat_frequency])
freq_feats = []
for d in all_feats:
freq_d = dict([(k,v) for k,v in d.items() if k in frequent])
freq_feats.append(freq_d)
return freq_feats
|
normal
|
{
"blob_id": "48677d73f6489ce789884a9dff5d50c23f47d8b3",
"index": 260,
"step-1": "<mask token>\n\n\nclass WindowFeatureExtractor(object):\n <mask token>\n <mask token>\n <mask token>\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True,\n feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X, y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception('Empty list passed to WindowFeatureExtractor.fit')\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n if self.min_feat_frequency <= 1:\n return all_feats\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k, v in keyCnt.items() if v >= self.\n min_feat_frequency])\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k, v) for k, v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats\n",
"step-3": "__author__ = 'simon.hughes'\n<mask token>\n\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True,\n feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X, y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception('Empty list passed to WindowFeatureExtractor.fit')\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n if self.min_feat_frequency <= 1:\n return all_feats\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k, v in keyCnt.items() if v >= self.\n min_feat_frequency])\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k, v) for k, v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats\n",
"step-4": "__author__ = 'simon.hughes'\nfrom sklearn.feature_extraction import DictVectorizer\nfrom WindowFeatures import compute_middle_index\nfrom collections import Counter\n\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True,\n feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X, y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception('Empty list passed to WindowFeatureExtractor.fit')\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n if self.min_feat_frequency <= 1:\n return all_feats\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k, v in keyCnt.items() if v >= self.\n min_feat_frequency])\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k, v) for k, v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats\n",
"step-5": "__author__ = 'simon.hughes'\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom WindowFeatures import compute_middle_index\nfrom collections import Counter\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True, feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X,y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception(\"Empty list passed to WindowFeatureExtractor.fit\")\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n\n if self.min_feat_frequency <= 1:\n return all_feats\n\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k,v in keyCnt.items() if v >= self.min_feat_frequency])\n\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k,v) for k,v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats",
"step-ids": [
2,
7,
8,
9,
10
]
}
|
[
2,
7,
8,
9,
10
] |
# System import
import os
# Docutils import
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.statemachine import ViewList
# Add node
class link_to_block(nodes.Admonition, nodes.Element):
""" Node for inserting a link to button."""
pass
# Add directive
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {
"right-side": bool,
"label": str
}
def run(self):
# Construct an empty node
new_content = ViewList()
ref = u":ref:`{0} <{1}>`".format(
self.options.get("label", "Link To"),
"".join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
# Add html writer
def visit_ltb_html(self, node):
""" Visit link to block"""
# Generate the html div
position = node.get("right-side", True)
self.body.append("<div class='{0}'>".format(
"buttonNext" if position else "buttonPrevious"))
def depart_ltb_html(self, node):
""" Depart link to block"""
# Add close div
self.depart_admonition(node)
# Register new directive
def setup(app):
app.add_directive("link-to-block", LinkToBlock)
app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))
|
normal
|
{
"blob_id": "63cce356b792949b90b215e0a5826f7b33d2d375",
"index": 8064,
"step-1": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n <mask token>\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n<mask token>\n\n\ndef setup(app):\n app.add_directive('link-to-block', LinkToBlock)\n app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))\n",
"step-4": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\ndef visit_ltb_html(self, node):\n \"\"\" Visit link to block\"\"\"\n position = node.get('right-side', True)\n self.body.append(\"<div class='{0}'>\".format('buttonNext' if position else\n 'buttonPrevious'))\n\n\ndef depart_ltb_html(self, node):\n \"\"\" Depart link to block\"\"\"\n self.depart_admonition(node)\n\n\ndef setup(app):\n app.add_directive('link-to-block', LinkToBlock)\n app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))\n",
"step-5": "# System import\nimport os\n\n# Docutils import\nfrom docutils import nodes\nfrom docutils.parsers.rst.directives.admonitions import BaseAdmonition\nfrom docutils.statemachine import ViewList\n\n\n# Add node\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\n# Add directive\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {\n \"right-side\": bool,\n \"label\": str\n }\n\n def run(self):\n # Construct an empty node\n new_content = ViewList()\n ref = u\":ref:`{0} <{1}>`\".format(\n self.options.get(\"label\", \"Link To\"),\n \"\".join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n# Add html writer\ndef visit_ltb_html(self, node):\n \"\"\" Visit link to block\"\"\" \n # Generate the html div\n position = node.get(\"right-side\", True)\n self.body.append(\"<div class='{0}'>\".format(\n \"buttonNext\" if position else \"buttonPrevious\"))\n\n\ndef depart_ltb_html(self, node):\n \"\"\" Depart link to block\"\"\"\n # Add close div\n self.depart_admonition(node)\n\n\n# Register new directive\ndef setup(app):\n app.add_directive(\"link-to-block\", LinkToBlock)\n app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))\n",
"step-ids": [
5,
6,
7,
9,
11
]
}
|
[
5,
6,
7,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def clean_room(update):
char, db_sess = get_data_character(update, return_sess=True)
if char and char.room:
if char.room.mobs:
for mob in char.room.mobs:
db_sess.delete(mob)
if char.room.items:
for item in char.room.items:
db_sess.delete(item)
db_sess.delete(char.room)
db_sess.commit()
<|reserved_special_token_1|>
from functions.service_funcs.get_data import get_data_character
def clean_room(update):
char, db_sess = get_data_character(update, return_sess=True)
if char and char.room:
if char.room.mobs:
for mob in char.room.mobs:
db_sess.delete(mob)
if char.room.items:
for item in char.room.items:
db_sess.delete(item)
db_sess.delete(char.room)
db_sess.commit()
<|reserved_special_token_1|>
from functions.service_funcs.get_data import get_data_character
def clean_room(update):
char, db_sess = get_data_character(update, return_sess=True)
# удаляем старую комнату и всю инфу о ней
if char and char.room:
if char.room.mobs:
for mob in char.room.mobs:
db_sess.delete(mob)
if char.room.items:
for item in char.room.items:
db_sess.delete(item)
db_sess.delete(char.room)
db_sess.commit()
|
flexible
|
{
"blob_id": "4d57fa22282d7b3f8adabedd7a04e32767181890",
"index": 5693,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n",
"step-3": "from functions.service_funcs.get_data import get_data_character\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n",
"step-4": "from functions.service_funcs.get_data import get_data_character\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n # удаляем старую комнату и всю инфу о ней\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', views.index, name='listings'), path(
'<int:listing_id>', views.listing, name='listing'), path('search',
views.search, name='search')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
urlpatterns = [path('', views.index, name='listings'), path(
'<int:listing_id>', views.listing, name='listing'), path('search',
views.search, name='search')]
<|reserved_special_token_1|>
# This handle the url for routing
from django.urls import path
from . import views
# Defines views to pass dynamic data to listings page
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search')
]
|
flexible
|
{
"blob_id": "be894830bb0dde6bacaea6be823391e0445603c3",
"index": 1192,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.index, name='listings'), path(\n '<int:listing_id>', views.listing, name='listing'), path('search',\n views.search, name='search')]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.index, name='listings'), path(\n '<int:listing_id>', views.listing, name='listing'), path('search',\n views.search, name='search')]\n",
"step-4": "# This handle the url for routing\n\nfrom django.urls import path\nfrom . import views\n\n# Defines views to pass dynamic data to listings page\nurlpatterns = [\n path('', views.index, name='listings'),\n path('<int:listing_id>', views.listing, name='listing'),\n path('search', views.search, name='search')\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import functools
import shutil
import tempfile
import unittest
import unittest.mock
from pathlib import Path
import numpy as np
import pandas as pd
import one.alf.io as alfio
from ibllib.io.extractors import training_trials, biased_trials, camera
from ibllib.io import raw_data_loaders as raw
from ibllib.io.extractors.base import BaseExtractor
def wheelMoves_fixture(func):
"""Decorator to save some dummy wheelMoves ALF files for extraction tests"""
@functools.wraps(func)
def wrapper(obj=None):
# Save some wheelMoves ALF files
attr_list = ['training_lt5',
'training_ge5',
'biased_lt5',
'biased_ge5']
alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]
n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]
for p, n in zip(alf_paths, n_trials):
p.mkdir()
np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))
np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))
# Run method
func(obj)
# Teardown; delete the files
for p in alf_paths:
shutil.rmtree(p)
return wrapper
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))
# turn off logging for unit testing as we will purposedly go into warning/error cases
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
# Save some dummy wheel moves data for trial firstMovement_times extraction
def test_get_feedbackType(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackType(
self.training_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = training_trials.FeedbackType(
self.training_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# BIASED SESSIONS
ft = biased_trials.FeedbackType(
self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = biased_trials.FeedbackType(
self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
# TRAINING SESSIONS
cl, cr = training_trials.ContrastLR(
self.training_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = training_trials.ContrastLR(
self.training_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# BIASED SESSIONS
cl, cr = biased_trials.ContrastLR(
self.biased_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = biased_trials.ContrastLR(
self.biased_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
def test_get_probabilityLeft(self):
# TRAINING SESSIONS
pl = training_trials.ProbabilityLeft(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# -- version >= 5.0.0
pl = training_trials.ProbabilityLeft(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# BIASED SESSIONS
pl = biased_trials.ProbabilityLeft(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_lt5['path'])
if md:
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
# -- version >= 5.0.0
pl = biased_trials.ProbabilityLeft(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_ge5['path'])
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
def test_get_choice(self):
# TRAINING SESSIONS
choice = training_trials.Choice(
session_path=self.training_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = training_trials.Choice(
session_path=self.training_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# BIASED SESSIONS
choice = biased_trials.Choice(
session_path=self.biased_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = biased_trials.Choice(
session_path=self.biased_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
# TODO: Test its sawtooth
# TRAINING SESSIONS
rn = training_trials.RepNum(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
# -- version >= 5.0.0
rn = training_trials.RepNum(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
# BIASED SESSIONS have no repeted trials
def test_get_rewardVolume(self):
# TRAINING SESSIONS
rv = training_trials.RewardVolume(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# -- version >= 5.0.0
rv = training_trials.RewardVolume(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# BIASED SESSIONS
rv = biased_trials.RewardVolume(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
# -- version >= 5.0.0
rv = biased_trials.RewardVolume(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
# TRAINING SESSIONS
sott = training_trials.StimOnTriggerTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = training_trials.StimOnTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# BIASED SESSIONS
sott = biased_trials.StimOnTriggerTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = biased_trials.StimOnTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_stimOnOffFreeze_times(self):
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
def test_get_intervals(self):
# TRAINING SESSIONS
di = training_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = training_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# BIASED SESSIONS
di = biased_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = biased_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
# TRAINING SESSIONS
rt = training_trials.ResponseTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = training_trials.ResponseTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# BIASED SESSIONS
rt = biased_trials.ResponseTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = biased_trials.ResponseTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
# TRAINING SESSIONS
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = training_trials.GoCueTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
# BIASED SESSIONS
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = biased_trials.GoCueTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
# TRAINING SESSIONS
gcot = training_trials.GoCueTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = training_trials.GoCueTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
# BIASED SESSIONS
gcot = biased_trials.GoCueTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = biased_trials.GoCueTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
# TRAINING SESSIONS
# Expect an error raised because no wheel moves were present in test data
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))
# -- version >= 5.0.0
out, files = training_trials.extract_all(self.training_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
# BIASED SESSIONS
# The new trials extractor additionally extracts the wheel data and this fails for the < 5.0
# test data so we will stub the wheel extractor
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = ({}, [])
out, files = biased_trials.extract_all(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
# -- version >= 5.0.0
out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
# TRAINING SESSIONS
# only for training?
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
# here we test for 2 kinds of file corruption that happen
# 1/2 the first sample time is corrupt and absurdly high and should be discarded
# 2/2 2 samples are swapped and need to be swapped backk
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
# the wheel folder contains other errors in bpod output that had to be addressed
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
def test_size_outputs(self):
# check the output dimensions
# VERSION >= 5.0.0
from ibllib.io.extractors.bpod_trials import extract_all
extract_all(self.training_ge5['path'])
trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
extract_all(self.biased_ge5['path'])
trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# VERSION < 5.0.0
# for these test data there are no wheel moves so let's mock the output
mock_data = {
'intervals': np.array([[0, 1], ]),
'peakAmplitude': np.array([1, 1]),
'peakVelocity_times': np.array([1, 1])}
function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'
# Training
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.training_lt5['path'])
trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# Biased
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.biased_lt5['path'])
trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + .001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) -> None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = (
"some_file.csv",
"some_file.tsv",
"some_file.ssv",
"some_file.npy",
)
var_names = (
"csv",
"ssv",
"tsv",
"npy",
)
def _extract(self, **kwargs) -> tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return (csv, ssv, tsv, npy)
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
# self.addClassCleanup(tempdir.cleanup) # py3.8
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
# UNIT DATA
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
# Add drift
ts += np.full_like(ts, 1e-4).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.
audio = {'times': np.empty(n_pulses * 2),
'polarities': gpio['polarities']}
for p in range(n_pulses):
i = p * 2
rise = (pulse_width * p) + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, 'Audio dict shouldn\'t be effected')
np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])
# Broken TTLs + extra TTL
delay = 0.08
pulse_width = 1e-5
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
# One front shifted by a large amount
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))
def test_attribute_times(self, display=False):
# Create two timestamp arrays at two different frequencies
tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm
tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm
tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front
tsb = np.sort(np.append(tsb, .41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
# Check with default args
matches = camera.attribute_times(tsa, tsb)
expected = np.array(
[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,
22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,
45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
# Taking closest instead of first should change index of ambiguous front
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
# Taking first after should exclude many pulses
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,
22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
# Lower tolerance
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
# Remove injective assert
matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')
expected = np.array(
[0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,
24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,
46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
# Check input validation
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
|
normal
|
{
"blob_id": "f17d33f1d035da42dc9a2b4c0c60beefc6a48dea",
"index": 64,
"step-1": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n <mask token>\n <mask token>\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <mask token>\n <mask token>\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <mask token>\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <mask token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <mask token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <mask token>\n <mask token>\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <mask token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-5": "import functools\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nimport one.alf.io as alfio\nfrom ibllib.io.extractors import training_trials, biased_trials, camera\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.extractors.base import BaseExtractor\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n @functools.wraps(func)\n def wrapper(obj=None):\n # Save some wheelMoves ALF files\n attr_list = ['training_lt5',\n 'training_ge5',\n 'biased_lt5',\n 'biased_ge5']\n alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n\n # Run method\n func(obj)\n\n # Teardown; delete the files\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))\n # turn off logging for unit testing as we will purposedly go into warning/error cases\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n # Save some dummy wheel moves data for trial firstMovement_times extraction\n\n def test_get_feedbackType(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackType(\n self.training_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = training_trials.FeedbackType(\n self.training_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackType(\n self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = biased_trials.FeedbackType(\n self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n # TRAINING SESSIONS\n cl, cr = training_trials.ContrastLR(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = training_trials.ContrastLR(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n # BIASED SESSIONS\n cl, cr = biased_trials.ContrastLR(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = biased_trials.ContrastLR(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n # TRAINING SESSIONS\n pl = training_trials.ProbabilityLeft(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # -- version >= 5.0.0\n pl = training_trials.ProbabilityLeft(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n\n # BIASED SESSIONS\n pl = biased_trials.ProbabilityLeft(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n # -- version >= 5.0.0\n pl = biased_trials.ProbabilityLeft(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n\n def test_get_choice(self):\n # TRAINING SESSIONS\n choice = training_trials.Choice(\n session_path=self.training_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = training_trials.Choice(\n session_path=self.training_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n # BIASED SESSIONS\n choice = biased_trials.Choice(\n session_path=self.biased_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = biased_trials.Choice(\n session_path=self.biased_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n # TODO: Test its sawtooth\n # TRAINING SESSIONS\n rn = training_trials.RepNum(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n # -- version >= 5.0.0\n rn = training_trials.RepNum(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n # BIASED SESSIONS have no repeted trials\n\n def test_get_rewardVolume(self):\n # TRAINING SESSIONS\n rv = training_trials.RewardVolume(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # -- version >= 5.0.0\n rv = training_trials.RewardVolume(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n\n # BIASED SESSIONS\n rv = biased_trials.RewardVolume(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n # -- version >= 5.0.0\n rv = biased_trials.RewardVolume(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n # TRAINING SESSIONS\n sott = training_trials.StimOnTriggerTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = training_trials.StimOnTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # BIASED SESSIONS\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n # TRAINING SESSIONS\n di = training_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = training_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n # BIASED SESSIONS\n di = biased_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = biased_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n # TRAINING SESSIONS\n rt = training_trials.ResponseTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = training_trials.ResponseTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n # BIASED SESSIONS\n rt = biased_trials.ResponseTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = biased_trials.ResponseTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n # TRAINING SESSIONS\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = training_trials.GoCueTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n # BIASED SESSIONS\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = biased_trials.GoCueTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n # TRAINING SESSIONS\n gcot = training_trials.GoCueTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = training_trials.GoCueTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n\n # BIASED SESSIONS\n gcot = biased_trials.GoCueTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = biased_trials.GoCueTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n # TRAINING SESSIONS\n # Expect an error raised because no wheel moves were present in test data\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(\n self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))\n # -- version >= 5.0.0\n out, files = training_trials.extract_all(self.training_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n # BIASED SESSIONS\n # The new trials extractor additionally extracts the wheel data and this fails for the < 5.0\n # test data so we will stub the wheel extractor\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = ({}, [])\n out, files = biased_trials.extract_all(\n self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n # -- version >= 5.0.0\n out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n # TRAINING SESSIONS\n # only for training?\n path = self.training_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n # here we test for 2 kinds of file corruption that happen\n # 1/2 the first sample time is corrupt and absurdly high and should be discarded\n # 2/2 2 samples are swapped and need to be swapped backk\n path = self.biased_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n # -- version >= 5.0.0\n path = self.biased_ge5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n # the wheel folder contains other errors in bpod output that had to be addressed\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n # check the output dimensions\n # VERSION >= 5.0.0\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # VERSION < 5.0.0\n # for these test data there are no wheel moves so let's mock the output\n mock_data = {\n 'intervals': np.array([[0, 1], ]),\n 'peakAmplitude': np.array([1, 1]),\n 'peakVelocity_times': np.array([1, 1])}\n function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'\n # Training\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # Biased\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + .001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) -> None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = (\n \"some_file.csv\",\n \"some_file.tsv\",\n \"some_file.ssv\",\n \"some_file.npy\",\n )\n var_names = (\n \"csv\",\n \"ssv\",\n \"tsv\",\n \"npy\",\n )\n\n def _extract(self, **kwargs) -> tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n\n return (csv, ssv, tsv, npy)\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n def setUp(self) -> None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n # self.addClassCleanup(tempdir.cleanup) # py3.8\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n def test_groom_pin_state(self):\n # UNIT DATA\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n # Add drift\n ts += np.full_like(ts, 1e-4).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.\n audio = {'times': np.empty(n_pulses * 2),\n 'polarities': gpio['polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = (pulse_width * p) + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, 'Audio dict shouldn\\'t be effected')\n np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])\n\n # Broken TTLs + extra TTL\n delay = 0.08\n pulse_width = 1e-5\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n\n # One front shifted by a large amount\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))\n\n def test_attribute_times(self, display=False):\n # Create two timestamp arrays at two different frequencies\n tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm\n tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm\n tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front\n tsb = np.sort(np.append(tsb, .41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n\n # Check with default args\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array(\n [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,\n 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,\n 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n\n # Taking closest instead of first should change index of ambiguous front\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n\n # Taking first after should exclude many pulses\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,\n 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n\n # Lower tolerance\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n\n # Remove injective assert\n matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')\n expected = np.array(\n [0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,\n 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,\n 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n\n # Check input validation\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\nif __name__ == \"__main__\":\n unittest.main(exit=False, verbosity=2)\n",
"step-ids": [
27,
34,
37,
45,
49
]
}
|
[
27,
34,
37,
45,
49
] |
<|reserved_special_token_0|>
class SnakeGame:
def __init__(self, board_width=10, board_height=10, gui=False,
enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board['width']), randint(1, self.board[
'height'])]
if food in self.enemy:
food = []
elif food in self.player:
food = []
self.food = food
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0:
new_point[0] -= 1
elif key == 1:
new_point[1] += 1
elif key == 2:
new_point[0] += 1
elif key == 3:
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
<|reserved_special_token_0|>
def generate_observations(self):
"""
:return: [lives, score, player, enemy, food]
"""
return self.lives, self.score, self.player, self.enemy, self.food
<|reserved_special_token_0|>
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont('bahnschrift', 20)
def step_render(self, key):
"""
:return: [lives, score, player, enemy, food]
"""
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],
SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],
SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(
self.score, 2), self.lives), True, (250, 250, 250))
self.screen.blit(text1, (5, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,
GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),
(GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def end_game(self):
if self.gui:
self.render_destroy()
raise Exception('Game over')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SnakeGame:
def __init__(self, board_width=10, board_height=10, gui=False,
enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
def start(self):
"""
:return: [lives, score, player, enemy, food]
"""
self.player_init(LIVES)
self.enemy_init()
self.generate_food()
if self.gui:
self.render_init()
return self.generate_observations()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board['width']), randint(1, self.board[
'height'])]
if food in self.enemy:
food = []
elif food in self.player:
food = []
self.food = food
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0:
new_point[0] -= 1
elif key == 1:
new_point[1] += 1
elif key == 2:
new_point[0] += 1
elif key == 3:
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
def check_collisions(self):
state = 0
player_collided = False
enemy_collided = False
if self.player[0][0] == 0 or self.player[0][0] == self.board['width'
] or self.player[0][1] == 0 or self.player[0][1] == self.board[
'height'] or self.player[0] in self.player[1:-1] or self.player[0
] in self.enemy:
player_collided = True
if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'
] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[
'height'] or self.enemy[0] in self.player or self.enemy[0
] in self.enemy[1:-1]:
enemy_collided = True
if player_collided:
self.lives -= 1
if not self.is_done():
self.player_init(self.lives)
if enemy_collided:
self.enemy_init()
def generate_observations(self):
"""
:return: [lives, score, player, enemy, food]
"""
return self.lives, self.score, self.player, self.enemy, self.food
<|reserved_special_token_0|>
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont('bahnschrift', 20)
def step_render(self, key):
"""
:return: [lives, score, player, enemy, food]
"""
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],
SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],
SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(
self.score, 2), self.lives), True, (250, 250, 250))
self.screen.blit(text1, (5, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,
GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),
(GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def end_game(self):
if self.gui:
self.render_destroy()
raise Exception('Game over')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SnakeGame:
def __init__(self, board_width=10, board_height=10, gui=False,
enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
def start(self):
"""
:return: [lives, score, player, enemy, food]
"""
self.player_init(LIVES)
self.enemy_init()
self.generate_food()
if self.gui:
self.render_init()
return self.generate_observations()
def player_init(self, lives=LIVES):
x = randint(3, math.ceil(self.board['width'] / 2) - 1)
y = randint(3, self.board['height'] - 3)
self.player = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.player.insert(0, point)
self.lives = lives
def enemy_init(self):
x = randint(math.ceil(self.board['width'] / 2), self.board['width'] - 3
)
y = randint(3, self.board['height'] - 3)
self.enemy = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.enemy.insert(0, point)
if self.enemy[0] in self.player[1:-1]:
self.enemy_init()
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board['width']), randint(1, self.board[
'height'])]
if food in self.enemy:
food = []
elif food in self.player:
food = []
self.food = food
def get_enemy_movement(self):
"""
0 - UP, (-1, 0)
1 - RIGHT, (
2 - DOWN,
3 - LEFT
"""
if np.random.random() <= self.enemy_epsilon:
return randint(0, 3)
if self.food[0] > self.enemy[0][0]:
return 2
elif self.food[0] < self.enemy[0][0]:
return 0
elif self.food[1] > self.enemy[0][1]:
return 1
elif self.food[1] < self.enemy[0][1]:
return 3
return randint(0, 3)
def step(self, key):
"""
0 - UP,
1 - RIGHT,
2 - DOWN,
3 - LEFT
:param key:
:return: [lives, score, player, enemy, food]
"""
if self.is_done():
self.end_game()
if not self.food:
self.generate_food()
self.create_new_point(self.player, key)
self.create_new_point(self.enemy, self.get_enemy_movement())
player_ate = False
if self.food_eaten(self.player):
self.score += FOOD_REWARD
self.generate_food()
player_ate = True
else:
self.remove_last_point(self.player)
self.score -= MOVE_PENALTY
if not player_ate and self.food_eaten(self.enemy):
self.generate_food()
else:
self.remove_last_point(self.enemy)
self.check_collisions()
if not self.food:
self.generate_food()
return self.generate_observations()
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0:
new_point[0] -= 1
elif key == 1:
new_point[1] += 1
elif key == 2:
new_point[0] += 1
elif key == 3:
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
def check_collisions(self):
state = 0
player_collided = False
enemy_collided = False
if self.player[0][0] == 0 or self.player[0][0] == self.board['width'
] or self.player[0][1] == 0 or self.player[0][1] == self.board[
'height'] or self.player[0] in self.player[1:-1] or self.player[0
] in self.enemy:
player_collided = True
if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'
] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[
'height'] or self.enemy[0] in self.player or self.enemy[0
] in self.enemy[1:-1]:
enemy_collided = True
if player_collided:
self.lives -= 1
if not self.is_done():
self.player_init(self.lives)
if enemy_collided:
self.enemy_init()
def generate_observations(self):
"""
:return: [lives, score, player, enemy, food]
"""
return self.lives, self.score, self.player, self.enemy, self.food
<|reserved_special_token_0|>
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont('bahnschrift', 20)
def step_render(self, key):
"""
:return: [lives, score, player, enemy, food]
"""
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],
SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],
SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(
self.score, 2), self.lives), True, (250, 250, 250))
self.screen.blit(text1, (5, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,
GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),
(GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def end_game(self):
if self.gui:
self.render_destroy()
raise Exception('Game over')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FOOD_REWARD = 5
DEATH_PENALTY = 10
MOVE_PENALTY = 0.1
LIVES = 5
SQUARE_COLOR = 80, 80, 80
SNAKE_HEAD_COLOR = (0, 51, 0), (0, 0, 153), (102, 0, 102)
SNAKE_COLOR = (154, 205, 50), (50, 50, 250), (50, 0, 250)
FOOD_COLOR = 255, 69, 0
class SnakeGame:
def __init__(self, board_width=10, board_height=10, gui=False,
enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
def start(self):
"""
:return: [lives, score, player, enemy, food]
"""
self.player_init(LIVES)
self.enemy_init()
self.generate_food()
if self.gui:
self.render_init()
return self.generate_observations()
def player_init(self, lives=LIVES):
x = randint(3, math.ceil(self.board['width'] / 2) - 1)
y = randint(3, self.board['height'] - 3)
self.player = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.player.insert(0, point)
self.lives = lives
def enemy_init(self):
x = randint(math.ceil(self.board['width'] / 2), self.board['width'] - 3
)
y = randint(3, self.board['height'] - 3)
self.enemy = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.enemy.insert(0, point)
if self.enemy[0] in self.player[1:-1]:
self.enemy_init()
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board['width']), randint(1, self.board[
'height'])]
if food in self.enemy:
food = []
elif food in self.player:
food = []
self.food = food
def get_enemy_movement(self):
"""
0 - UP, (-1, 0)
1 - RIGHT, (
2 - DOWN,
3 - LEFT
"""
if np.random.random() <= self.enemy_epsilon:
return randint(0, 3)
if self.food[0] > self.enemy[0][0]:
return 2
elif self.food[0] < self.enemy[0][0]:
return 0
elif self.food[1] > self.enemy[0][1]:
return 1
elif self.food[1] < self.enemy[0][1]:
return 3
return randint(0, 3)
def step(self, key):
"""
0 - UP,
1 - RIGHT,
2 - DOWN,
3 - LEFT
:param key:
:return: [lives, score, player, enemy, food]
"""
if self.is_done():
self.end_game()
if not self.food:
self.generate_food()
self.create_new_point(self.player, key)
self.create_new_point(self.enemy, self.get_enemy_movement())
player_ate = False
if self.food_eaten(self.player):
self.score += FOOD_REWARD
self.generate_food()
player_ate = True
else:
self.remove_last_point(self.player)
self.score -= MOVE_PENALTY
if not player_ate and self.food_eaten(self.enemy):
self.generate_food()
else:
self.remove_last_point(self.enemy)
self.check_collisions()
if not self.food:
self.generate_food()
return self.generate_observations()
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0:
new_point[0] -= 1
elif key == 1:
new_point[1] += 1
elif key == 2:
new_point[0] += 1
elif key == 3:
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
def check_collisions(self):
state = 0
player_collided = False
enemy_collided = False
if self.player[0][0] == 0 or self.player[0][0] == self.board['width'
] or self.player[0][1] == 0 or self.player[0][1] == self.board[
'height'] or self.player[0] in self.player[1:-1] or self.player[0
] in self.enemy:
player_collided = True
if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'
] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[
'height'] or self.enemy[0] in self.player or self.enemy[0
] in self.enemy[1:-1]:
enemy_collided = True
if player_collided:
self.lives -= 1
if not self.is_done():
self.player_init(self.lives)
if enemy_collided:
self.enemy_init()
def generate_observations(self):
"""
:return: [lives, score, player, enemy, food]
"""
return self.lives, self.score, self.player, self.enemy, self.food
"""Methods for Rendering the game"""
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont('bahnschrift', 20)
def step_render(self, key):
"""
:return: [lives, score, player, enemy, food]
"""
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],
SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],
SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(
self.score, 2), self.lives), True, (250, 250, 250))
self.screen.blit(text1, (5, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,
GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),
(GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def is_done(self):
return self.lives <= 0
def render_destroy(self):
print('Snake Player Final Score:', self.score)
def end_game(self):
if self.gui:
self.render_destroy()
raise Exception('Game over')
<|reserved_special_token_1|>
import math
import pygame
import numpy as np
from main import Snake, SCREEN_WIDTH, SCREEN_HEIGHT, drawGrid, GRIDSIZE
from random import randint
FOOD_REWARD = 5
DEATH_PENALTY = 10
MOVE_PENALTY = 0.1
LIVES = 5
SQUARE_COLOR = (80,80,80)
SNAKE_HEAD_COLOR = ((0,51,0), (0,0,153), (102,0,102))
SNAKE_COLOR = ((154,205,50), (50,50,250), (50,0,250))
FOOD_COLOR = (255,69,0)
class SnakeGame:
def __init__(self, board_width = 10, board_height = 10, gui = False, enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
def start(self):
'''
:return: [lives, score, player, enemy, food]
'''
self.player_init(LIVES)
self.enemy_init()
self.generate_food()
if self.gui: self.render_init()
return self.generate_observations()
def player_init(self, lives=LIVES):
x = randint(3, math.ceil(self.board["width"] / 2) - 1)
y = randint(3, self.board["height"] - 3)
self.player = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.player.insert(0, point)
self.lives = lives
def enemy_init(self):
x = randint(math.ceil(self.board["width"] / 2), self.board["width"] - 3)
y = randint(3, self.board["height"] - 3)
self.enemy = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.enemy.insert(0, point)
if self.enemy[0] in self.player[1:-1]:
self.enemy_init() # retry
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board["width"]), randint(1, self.board["height"])]
if food in self.enemy: food = []
elif food in self.player: food = []
self.food = food
def get_enemy_movement(self):
'''
0 - UP, (-1, 0)
1 - RIGHT, (
2 - DOWN,
3 - LEFT
'''
if np.random.random() <= self.enemy_epsilon:
return randint(0, 3)
if self.food[0] > self.enemy[0][0]:
return 2
elif self.food[0] < self.enemy[0][0]:
return 0
elif self.food[1] > self.enemy[0][1]:
return 1
elif self.food[1] < self.enemy[0][1]:
return 3
return randint(0, 3)
def step(self, key):
'''
0 - UP,
1 - RIGHT,
2 - DOWN,
3 - LEFT
:param key:
:return: [lives, score, player, enemy, food]
'''
if self.is_done() :
self.end_game()
if not self.food:
self.generate_food()
self.create_new_point(self.player, key)
self.create_new_point(self.enemy, self.get_enemy_movement())
player_ate = False
if self.food_eaten(self.player):
self.score += FOOD_REWARD
self.generate_food()
player_ate = True
else:
self.remove_last_point(self.player)
self.score -= MOVE_PENALTY
if (not player_ate) and self.food_eaten(self.enemy):
self.generate_food()
else:
self.remove_last_point(self.enemy)
self.check_collisions()
if not self.food:
self.generate_food()
return self.generate_observations()
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0: # UP
new_point[0] -= 1
elif key == 1: # RIGHT
new_point[1] += 1
elif key == 2: # DOWN
new_point[0] += 1
elif key == 3: # LEFT
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
def check_collisions(self):
state = 0
# 0 -> no collision,
# 1 -> player collision,
# 2 -> enemy collision
player_collided = False
enemy_collided = False
if (self.player[0][0] == 0 or
self.player[0][0] == self.board["width"] or
self.player[0][1] == 0 or
self.player[0][1] == self.board["height"] or
self.player[0] in self.player[1:-1] or
self.player[0] in self.enemy):
player_collided = True
if (self.enemy[0][0] == 0 or
self.enemy[0][0] == self.board["width"] or
self.enemy[0][1] == 0 or
self.enemy[0][1] == self.board["height"] or
self.enemy[0] in self.player or
self.enemy[0] in self.enemy[1:-1]):
enemy_collided = True
if player_collided:
self.lives -= 1
if not self.is_done():
self.player_init(self.lives)
if enemy_collided:
self.enemy_init() # enemy moves randomly but has infinite lives
def generate_observations(self):
'''
:return: [lives, score, player, enemy, food]
'''
return self.lives, self.score, self.player, self.enemy, self.food
'''Methods for Rendering the game'''
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont("bahnschrift", 20)
def step_render(self, key):
'''
:return: [lives, score, player, enemy, food]
'''
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0], SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1], SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render("Score: {0} Lives: {1}".format(round(self.score, 2), self.lives), True, (250, 250, 250))
# text2 = myfont.render("Score AI {0}".format(enemy.score), 1, (250, 250, 250))
self.screen.blit(text1, (5, 10))
# screen.blit(text2, (SCREEN_WIDTH - 120, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0]*GRIDSIZE, p[1]*GRIDSIZE), (GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE), (GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def is_done(self):
return self.lives <= 0
def render_destroy(self):
print("Snake Player Final Score:", self.score)
def end_game(self):
if self.gui: self.render_destroy()
raise Exception("Game over")
|
flexible
|
{
"blob_id": "3bb408f2b2ac63a2555258c05844881ccdfc5057",
"index": 5428,
"step-1": "<mask token>\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n <mask token>\n <mask token>\n <mask token>\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n <mask token>\n <mask token>\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n <mask token>\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n <mask token>\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n <mask token>\n <mask token>\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-2": "<mask token>\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui:\n self.render_init()\n return self.generate_observations()\n <mask token>\n <mask token>\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n <mask token>\n <mask token>\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n def check_collisions(self):\n state = 0\n player_collided = False\n enemy_collided = False\n if self.player[0][0] == 0 or self.player[0][0] == self.board['width'\n ] or self.player[0][1] == 0 or self.player[0][1] == self.board[\n 'height'] or self.player[0] in self.player[1:-1] or self.player[0\n ] in self.enemy:\n player_collided = True\n if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'\n ] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[\n 'height'] or self.enemy[0] in self.player or self.enemy[0\n ] in self.enemy[1:-1]:\n enemy_collided = True\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n if enemy_collided:\n self.enemy_init()\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n <mask token>\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n <mask token>\n <mask token>\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-3": "<mask token>\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui:\n self.render_init()\n return self.generate_observations()\n\n def player_init(self, lives=LIVES):\n x = randint(3, math.ceil(self.board['width'] / 2) - 1)\n y = randint(3, self.board['height'] - 3)\n self.player = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.player.insert(0, point)\n self.lives = lives\n\n def enemy_init(self):\n x = randint(math.ceil(self.board['width'] / 2), self.board['width'] - 3\n )\n y = randint(3, self.board['height'] - 3)\n self.enemy = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.enemy.insert(0, point)\n if self.enemy[0] in self.player[1:-1]:\n self.enemy_init()\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n\n def get_enemy_movement(self):\n \"\"\"\n 0 - UP, (-1, 0)\n 1 - RIGHT, (\n 2 - DOWN,\n 3 - LEFT\n \"\"\"\n if np.random.random() <= self.enemy_epsilon:\n return randint(0, 3)\n if self.food[0] > self.enemy[0][0]:\n return 2\n elif self.food[0] < self.enemy[0][0]:\n return 0\n elif self.food[1] > self.enemy[0][1]:\n return 1\n elif self.food[1] < self.enemy[0][1]:\n return 3\n return randint(0, 3)\n\n def step(self, key):\n \"\"\"\n 0 - UP,\n 1 - RIGHT,\n 2 - DOWN,\n 3 - LEFT\n :param key:\n :return: [lives, score, player, enemy, food]\n \"\"\"\n if self.is_done():\n self.end_game()\n if not self.food:\n self.generate_food()\n self.create_new_point(self.player, key)\n self.create_new_point(self.enemy, self.get_enemy_movement())\n player_ate = False\n if self.food_eaten(self.player):\n self.score += FOOD_REWARD\n self.generate_food()\n player_ate = True\n else:\n self.remove_last_point(self.player)\n self.score -= MOVE_PENALTY\n if not player_ate and self.food_eaten(self.enemy):\n self.generate_food()\n else:\n self.remove_last_point(self.enemy)\n self.check_collisions()\n if not self.food:\n self.generate_food()\n return self.generate_observations()\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n def check_collisions(self):\n state = 0\n player_collided = False\n enemy_collided = False\n if self.player[0][0] == 0 or self.player[0][0] == self.board['width'\n ] or self.player[0][1] == 0 or self.player[0][1] == self.board[\n 'height'] or self.player[0] in self.player[1:-1] or self.player[0\n ] in self.enemy:\n player_collided = True\n if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'\n ] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[\n 'height'] or self.enemy[0] in self.player or self.enemy[0\n ] in self.enemy[1:-1]:\n enemy_collided = True\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n if enemy_collided:\n self.enemy_init()\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n <mask token>\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n <mask token>\n <mask token>\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-4": "<mask token>\nFOOD_REWARD = 5\nDEATH_PENALTY = 10\nMOVE_PENALTY = 0.1\nLIVES = 5\nSQUARE_COLOR = 80, 80, 80\nSNAKE_HEAD_COLOR = (0, 51, 0), (0, 0, 153), (102, 0, 102)\nSNAKE_COLOR = (154, 205, 50), (50, 50, 250), (50, 0, 250)\nFOOD_COLOR = 255, 69, 0\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui:\n self.render_init()\n return self.generate_observations()\n\n def player_init(self, lives=LIVES):\n x = randint(3, math.ceil(self.board['width'] / 2) - 1)\n y = randint(3, self.board['height'] - 3)\n self.player = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.player.insert(0, point)\n self.lives = lives\n\n def enemy_init(self):\n x = randint(math.ceil(self.board['width'] / 2), self.board['width'] - 3\n )\n y = randint(3, self.board['height'] - 3)\n self.enemy = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.enemy.insert(0, point)\n if self.enemy[0] in self.player[1:-1]:\n self.enemy_init()\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n\n def get_enemy_movement(self):\n \"\"\"\n 0 - UP, (-1, 0)\n 1 - RIGHT, (\n 2 - DOWN,\n 3 - LEFT\n \"\"\"\n if np.random.random() <= self.enemy_epsilon:\n return randint(0, 3)\n if self.food[0] > self.enemy[0][0]:\n return 2\n elif self.food[0] < self.enemy[0][0]:\n return 0\n elif self.food[1] > self.enemy[0][1]:\n return 1\n elif self.food[1] < self.enemy[0][1]:\n return 3\n return randint(0, 3)\n\n def step(self, key):\n \"\"\"\n 0 - UP,\n 1 - RIGHT,\n 2 - DOWN,\n 3 - LEFT\n :param key:\n :return: [lives, score, player, enemy, food]\n \"\"\"\n if self.is_done():\n self.end_game()\n if not self.food:\n self.generate_food()\n self.create_new_point(self.player, key)\n self.create_new_point(self.enemy, self.get_enemy_movement())\n player_ate = False\n if self.food_eaten(self.player):\n self.score += FOOD_REWARD\n self.generate_food()\n player_ate = True\n else:\n self.remove_last_point(self.player)\n self.score -= MOVE_PENALTY\n if not player_ate and self.food_eaten(self.enemy):\n self.generate_food()\n else:\n self.remove_last_point(self.enemy)\n self.check_collisions()\n if not self.food:\n self.generate_food()\n return self.generate_observations()\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n def check_collisions(self):\n state = 0\n player_collided = False\n enemy_collided = False\n if self.player[0][0] == 0 or self.player[0][0] == self.board['width'\n ] or self.player[0][1] == 0 or self.player[0][1] == self.board[\n 'height'] or self.player[0] in self.player[1:-1] or self.player[0\n ] in self.enemy:\n player_collided = True\n if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'\n ] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[\n 'height'] or self.enemy[0] in self.player or self.enemy[0\n ] in self.enemy[1:-1]:\n enemy_collided = True\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n if enemy_collided:\n self.enemy_init()\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n \"\"\"Methods for Rendering the game\"\"\"\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def is_done(self):\n return self.lives <= 0\n\n def render_destroy(self):\n print('Snake Player Final Score:', self.score)\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-5": "import math\n\nimport pygame\nimport numpy as np\nfrom main import Snake, SCREEN_WIDTH, SCREEN_HEIGHT, drawGrid, GRIDSIZE\nfrom random import randint\n\nFOOD_REWARD = 5\nDEATH_PENALTY = 10\nMOVE_PENALTY = 0.1\nLIVES = 5\n\nSQUARE_COLOR = (80,80,80)\nSNAKE_HEAD_COLOR = ((0,51,0), (0,0,153), (102,0,102))\nSNAKE_COLOR = ((154,205,50), (50,50,250), (50,0,250))\nFOOD_COLOR = (255,69,0)\n\nclass SnakeGame:\n def __init__(self, board_width = 10, board_height = 10, gui = False, enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n '''\n :return: [lives, score, player, enemy, food]\n '''\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui: self.render_init()\n return self.generate_observations()\n\n def player_init(self, lives=LIVES):\n x = randint(3, math.ceil(self.board[\"width\"] / 2) - 1)\n y = randint(3, self.board[\"height\"] - 3)\n self.player = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.player.insert(0, point)\n self.lives = lives\n\n def enemy_init(self):\n x = randint(math.ceil(self.board[\"width\"] / 2), self.board[\"width\"] - 3)\n y = randint(3, self.board[\"height\"] - 3)\n self.enemy = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.enemy.insert(0, point)\n\n if self.enemy[0] in self.player[1:-1]:\n self.enemy_init() # retry\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board[\"width\"]), randint(1, self.board[\"height\"])]\n if food in self.enemy: food = []\n elif food in self.player: food = []\n self.food = food\n\n def get_enemy_movement(self):\n '''\n 0 - UP, (-1, 0)\n 1 - RIGHT, (\n 2 - DOWN,\n 3 - LEFT\n '''\n if np.random.random() <= self.enemy_epsilon:\n return randint(0, 3)\n\n if self.food[0] > self.enemy[0][0]:\n return 2\n elif self.food[0] < self.enemy[0][0]:\n return 0\n elif self.food[1] > self.enemy[0][1]:\n return 1\n elif self.food[1] < self.enemy[0][1]:\n return 3\n\n return randint(0, 3)\n\n def step(self, key):\n '''\n 0 - UP,\n 1 - RIGHT,\n 2 - DOWN,\n 3 - LEFT\n :param key:\n :return: [lives, score, player, enemy, food]\n '''\n\n if self.is_done() :\n self.end_game()\n\n if not self.food:\n self.generate_food()\n\n self.create_new_point(self.player, key)\n self.create_new_point(self.enemy, self.get_enemy_movement())\n\n player_ate = False\n if self.food_eaten(self.player):\n self.score += FOOD_REWARD\n self.generate_food()\n player_ate = True\n else:\n self.remove_last_point(self.player)\n self.score -= MOVE_PENALTY\n\n if (not player_ate) and self.food_eaten(self.enemy):\n self.generate_food()\n else:\n self.remove_last_point(self.enemy)\n\n self.check_collisions()\n\n if not self.food:\n self.generate_food()\n\n return self.generate_observations()\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0: # UP\n new_point[0] -= 1\n elif key == 1: # RIGHT\n new_point[1] += 1\n elif key == 2: # DOWN\n new_point[0] += 1\n elif key == 3: # LEFT\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n\n def check_collisions(self):\n\n state = 0\n # 0 -> no collision,\n # 1 -> player collision,\n # 2 -> enemy collision\n\n player_collided = False\n enemy_collided = False\n\n if (self.player[0][0] == 0 or\n self.player[0][0] == self.board[\"width\"] or\n self.player[0][1] == 0 or\n self.player[0][1] == self.board[\"height\"] or\n self.player[0] in self.player[1:-1] or\n self.player[0] in self.enemy):\n player_collided = True\n\n if (self.enemy[0][0] == 0 or\n self.enemy[0][0] == self.board[\"width\"] or\n self.enemy[0][1] == 0 or\n self.enemy[0][1] == self.board[\"height\"] or\n self.enemy[0] in self.player or\n self.enemy[0] in self.enemy[1:-1]):\n enemy_collided = True\n\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n\n if enemy_collided:\n self.enemy_init() # enemy moves randomly but has infinite lives\n\n def generate_observations(self):\n '''\n :return: [lives, score, player, enemy, food]\n '''\n return self.lives, self.score, self.player, self.enemy, self.food\n\n '''Methods for Rendering the game'''\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\n\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont(\"bahnschrift\", 20)\n\n def step_render(self, key):\n '''\n :return: [lives, score, player, enemy, food]\n '''\n self.clock.tick(3)\n drawGrid(self.surface)\n\n if not self.food:\n self.generate_food()\n\n _lives, _score, _player, _enemy, _food = self.step(key)\n\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0], SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1], SNAKE_HEAD_COLOR[1])\n\n if not self.food:\n self.generate_food()\n\n self.draw_food(self.surface, FOOD_COLOR)\n\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render(\"Score: {0} Lives: {1}\".format(round(self.score, 2), self.lives), True, (250, 250, 250))\n # text2 = myfont.render(\"Score AI {0}\".format(enemy.score), 1, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n # screen.blit(text2, (SCREEN_WIDTH - 120, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n\n r = pygame.Rect((p[0]*GRIDSIZE, p[1]*GRIDSIZE), (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE), (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def is_done(self):\n return self.lives <= 0\n\n def render_destroy(self):\n print(\"Snake Player Final Score:\", self.score)\n\n def end_game(self):\n if self.gui: self.render_destroy()\n raise Exception(\"Game over\")",
"step-ids": [
12,
14,
18,
22,
24
]
}
|
[
12,
14,
18,
22,
24
] |
#!/usr/bin/env python
"""
##############################################################################
Software Package Risk Analysis Development Environment Specific Work Book View
##############################################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.software.__gui.gtk.DevelopmentEnvironment.py is part of The RTK
# Project
#
# All rights reserved.
import sys
# Import modules for localization support.
import gettext
import locale
# Modules required for the GUI.
try:
import pygtk
pygtk.require('2.0')
except ImportError:
sys.exit(1)
try:
import gtk
except ImportError:
sys.exit(1)
try:
import gtk.glade
except ImportError:
sys.exit(1)
# Import other RTK modules.
try:
import Configuration
import gui.gtk.Widgets as Widgets
except ImportError:
import rtk.Configuration as Configuration
import rtk.gui.gtk.Widgets as Widgets
__author__ = 'Andrew Rowland'
__email__ = 'andrew.rowland@reliaqual.com'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error:
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class RiskAnalysis(gtk.VPaned):
"""
The Work Book view for analyzing and displaying the risk associated with
the development environment. The attributes of a development environment
Work Book view are:
:ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.
:ivar _software_model: the :py:class:`rtk.software.Software.Model` to
display.
"""
def __init__(self):
"""
Method to initialize the development environment risk analysis
questions Work Book page.
"""
gtk.VPaned.__init__(self)
# Define private dictionary attributes.
# Define private list attributes.
self._lst_handler_id = []
# Define private scalar attributes.
self._software_model = None
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.chkDevEnvQ1 = Widgets.make_check_button()
self.chkDevEnvQ2 = Widgets.make_check_button()
self.chkDevEnvQ3 = Widgets.make_check_button()
self.chkDevEnvQ4 = Widgets.make_check_button()
self.chkDevEnvQ5 = Widgets.make_check_button()
self.chkDevEnvQ6 = Widgets.make_check_button()
self.chkDevEnvQ7 = Widgets.make_check_button()
self.chkDevEnvQ8 = Widgets.make_check_button()
self.chkDevEnvQ9 = Widgets.make_check_button()
self.chkDevEnvQ10 = Widgets.make_check_button()
self.chkDevEnvQ11 = Widgets.make_check_button()
self.chkDevEnvQ12 = Widgets.make_check_button()
self.chkDevEnvQ13 = Widgets.make_check_button()
self.chkDevEnvQ14 = Widgets.make_check_button()
self.chkDevEnvQ15 = Widgets.make_check_button()
self.chkDevEnvQ16 = Widgets.make_check_button()
self.chkDevEnvQ17 = Widgets.make_check_button()
self.chkDevEnvQ18 = Widgets.make_check_button()
self.chkDevEnvQ19 = Widgets.make_check_button()
self.chkDevEnvQ20 = Widgets.make_check_button()
self.chkDevEnvQ21 = Widgets.make_check_button()
self.chkDevEnvQ22 = Widgets.make_check_button()
self.chkDevEnvQ23 = Widgets.make_check_button()
self.chkDevEnvQ24 = Widgets.make_check_button()
self.chkDevEnvQ25 = Widgets.make_check_button()
self.chkDevEnvQ26 = Widgets.make_check_button()
self.chkDevEnvQ27 = Widgets.make_check_button()
self.chkDevEnvQ28 = Widgets.make_check_button()
self.chkDevEnvQ29 = Widgets.make_check_button()
self.chkDevEnvQ30 = Widgets.make_check_button()
self.chkDevEnvQ31 = Widgets.make_check_button()
self.chkDevEnvQ32 = Widgets.make_check_button()
self.chkDevEnvQ33 = Widgets.make_check_button()
self.chkDevEnvQ34 = Widgets.make_check_button()
self.chkDevEnvQ35 = Widgets.make_check_button()
self.chkDevEnvQ36 = Widgets.make_check_button()
self.chkDevEnvQ37 = Widgets.make_check_button()
self.chkDevEnvQ38 = Widgets.make_check_button()
self.chkDevEnvQ39 = Widgets.make_check_button()
self.chkDevEnvQ40 = Widgets.make_check_button()
self.chkDevEnvQ41 = Widgets.make_check_button()
self.chkDevEnvQ42 = Widgets.make_check_button()
self.chkDevEnvQ43 = Widgets.make_check_button()
# Connect gtk.Widget() signals to callback methods.
self._lst_handler_id.append(
self.chkDevEnvQ1.connect('toggled', self._on_toggled, 0))
self._lst_handler_id.append(
self.chkDevEnvQ2.connect('toggled', self._on_toggled, 1))
self._lst_handler_id.append(
self.chkDevEnvQ3.connect('toggled', self._on_toggled, 2))
self._lst_handler_id.append(
self.chkDevEnvQ4.connect('toggled', self._on_toggled, 3))
self._lst_handler_id.append(
self.chkDevEnvQ5.connect('toggled', self._on_toggled, 4))
self._lst_handler_id.append(
self.chkDevEnvQ6.connect('toggled', self._on_toggled, 5))
self._lst_handler_id.append(
self.chkDevEnvQ7.connect('toggled', self._on_toggled, 6))
self._lst_handler_id.append(
self.chkDevEnvQ8.connect('toggled', self._on_toggled, 7))
self._lst_handler_id.append(
self.chkDevEnvQ9.connect('toggled', self._on_toggled, 8))
self._lst_handler_id.append(
self.chkDevEnvQ10.connect('toggled', self._on_toggled, 9))
self._lst_handler_id.append(
self.chkDevEnvQ11.connect('toggled', self._on_toggled, 10))
self._lst_handler_id.append(
self.chkDevEnvQ12.connect('toggled', self._on_toggled, 11))
self._lst_handler_id.append(
self.chkDevEnvQ13.connect('toggled', self._on_toggled, 12))
self._lst_handler_id.append(
self.chkDevEnvQ14.connect('toggled', self._on_toggled, 13))
self._lst_handler_id.append(
self.chkDevEnvQ15.connect('toggled', self._on_toggled, 14))
self._lst_handler_id.append(
self.chkDevEnvQ16.connect('toggled', self._on_toggled, 15))
self._lst_handler_id.append(
self.chkDevEnvQ17.connect('toggled', self._on_toggled, 16))
self._lst_handler_id.append(
self.chkDevEnvQ18.connect('toggled', self._on_toggled, 17))
self._lst_handler_id.append(
self.chkDevEnvQ19.connect('toggled', self._on_toggled, 18))
self._lst_handler_id.append(
self.chkDevEnvQ20.connect('toggled', self._on_toggled, 19))
self._lst_handler_id.append(
self.chkDevEnvQ21.connect('toggled', self._on_toggled, 20))
self._lst_handler_id.append(
self.chkDevEnvQ22.connect('toggled', self._on_toggled, 21))
self._lst_handler_id.append(
self.chkDevEnvQ23.connect('toggled', self._on_toggled, 22))
self._lst_handler_id.append(
self.chkDevEnvQ24.connect('toggled', self._on_toggled, 23))
self._lst_handler_id.append(
self.chkDevEnvQ25.connect('toggled', self._on_toggled, 24))
self._lst_handler_id.append(
self.chkDevEnvQ26.connect('toggled', self._on_toggled, 25))
self._lst_handler_id.append(
self.chkDevEnvQ27.connect('toggled', self._on_toggled, 26))
self._lst_handler_id.append(
self.chkDevEnvQ28.connect('toggled', self._on_toggled, 27))
self._lst_handler_id.append(
self.chkDevEnvQ29.connect('toggled', self._on_toggled, 28))
self._lst_handler_id.append(
self.chkDevEnvQ30.connect('toggled', self._on_toggled, 29))
self._lst_handler_id.append(
self.chkDevEnvQ31.connect('toggled', self._on_toggled, 30))
self._lst_handler_id.append(
self.chkDevEnvQ32.connect('toggled', self._on_toggled, 31))
self._lst_handler_id.append(
self.chkDevEnvQ33.connect('toggled', self._on_toggled, 32))
self._lst_handler_id.append(
self.chkDevEnvQ34.connect('toggled', self._on_toggled, 33))
self._lst_handler_id.append(
self.chkDevEnvQ35.connect('toggled', self._on_toggled, 34))
self._lst_handler_id.append(
self.chkDevEnvQ36.connect('toggled', self._on_toggled, 35))
self._lst_handler_id.append(
self.chkDevEnvQ37.connect('toggled', self._on_toggled, 36))
self._lst_handler_id.append(
self.chkDevEnvQ38.connect('toggled', self._on_toggled, 37))
self._lst_handler_id.append(
self.chkDevEnvQ39.connect('toggled', self._on_toggled, 38))
self._lst_handler_id.append(
self.chkDevEnvQ40.connect('toggled', self._on_toggled, 39))
self._lst_handler_id.append(
self.chkDevEnvQ41.connect('toggled', self._on_toggled, 40))
self._lst_handler_id.append(
self.chkDevEnvQ42.connect('toggled', self._on_toggled, 41))
self._lst_handler_id.append(
self.chkDevEnvQ43.connect('toggled', self._on_toggled, 42))
def create_risk_analysis_page(self, notebook):
"""
Method to create the development environment risk analysis page and add
it to the risk analysis gtk.Notebook().
:param gtk.Notebook notebook: the gtk.Notebook() instance that will
hold the development environment risk
analysis questions.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Build-up the containers for the tab. #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
_hpaned = gtk.HPaned()
self.pack1(_hpaned, resize=True, shrink=True)
# Create the organizational risk pane.
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Organization"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack1(_frame, True, True)
_labels = [_(u"1. There are separate design and coding "
u"organizations."),
_(u"2. There is an independent software test "
u"organization."),
_(u"3. There is an independent software quality "
u"assurance organization."),
_(u"4. There is an independent software configuration "
u"management organization."),
_(u"5. There is an independent software verification "
u"and validation organization."),
_(u"6. A structured programming team will develop the "
u"software."),
_(u"7. The educational level of the software team members "
u"is above average."),
_(u"8. The experience level of the software team members "
u"is above average.")]
(_x_pos,
_y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_x_pos += 125
_fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])
# Create the methods risk pane.
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Methods"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack2(_frame, True, True)
_labels = [_(u"1. Standards are defined and will be enforced."),
_(u"2. Software will be developed using a higher order "
u"language."),
_(u"3. The development process will include formal "
u"reviews (PDR, CDR, etc.)."),
_(u"4. The development process will include frequent "
u"walkthroughs."),
_(u"5. Development will take a top-down and "
u"structured approach."),
_(u"6. Unit development folders will be used."),
_(u"7. A software development library will be used."),
_(u"8. A formal change and error reporting process "
u"will be used."),
_(u"9. Progress and status will routinely be "
u"reported.")]
(__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])
_fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])
# Create the documentation risk pane.
_hpaned = gtk.HPaned()
self.pack2(_hpaned, resize=True, shrink=True)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Documentation"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack1(_frame, True, True)
_labels = [_(u" 1. System requirements specifications will be "
u"documented."),
_(u" 2. Software requirements specifications will be "
u"documented."),
_(u" 3. Interface design specifications will be "
u"documented."),
_(u" 4. Software design specification will be "
u"documented."),
_(u" 5. Test plans, procedures, and reports will be "
u"documented."),
_(u" 6. The software development plan will be "
u"documented."),
_(u" 7. The software quality assurance plan will be "
u"documented."),
_(u" 8. The software configuration management plan will "
u"be documented."),
_(u" 9. A requirements traceability matrix will be "
u"used."),
_(u"10. The software version description will be "
u"documented."),
_(u"11. All software discrepancies will be "
u"documented.")]
(__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])
_fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])
_fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])
_fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])
# Create the tools and test techniques risk pane.
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Tools & Test Techniques"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack2(_frame, True, True)
_labels = [_(u" 1. The software language requirements will be "
u"specified."),
_(u" 2. Formal program design language will be used."),
_(u" 3. Program design graphical techniques "
u"(flowcharts, HIPO, etc.) will be used."),
_(u" 4. Simulation/emulation tools will be used."),
_(u" 5. Configuration management tools will be used."),
_(u" 6. A code auditing tool will be used."),
_(u" 7. A data flow analyzer will be used."),
_(u" 8. A programmer's workbench will be used."),
_(u" 9. Measurement tools will be used."),
_(u"10. Software code reviews will be used."),
_(u"11. Software branch testing will be used."),
_(u"12. Random testing will be used."),
_(u"13. Functional testing will be used."),
_(u"14. Error and anomaly detection testing will be "
u"used."),
_(u"15. Structure analysis will be used.")]
(__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])
_fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])
_fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])
_fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])
_fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])
_fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])
_fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])
_fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"Development\nEnvironment") +
"</span>")
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.set_justify(gtk.JUSTIFY_CENTER)
_label.set_angle(0)
_label.show_all()
_label.set_tooltip_text(_(u"Assesses risk due to the development "
u"environment."))
notebook.insert_page(self, tab_label=_label, position=-1)
return False
def load(self, model):
"""
Method to load the Development Environment Risk Analysis answers.
:param `rtk.software.Software` model: the Software data model to load
the gtk.ToggleButton() from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self._software_model = model
self.chkDevEnvQ1.set_active(model.lst_development[0])
self.chkDevEnvQ2.set_active(model.lst_development[1])
self.chkDevEnvQ3.set_active(model.lst_development[2])
self.chkDevEnvQ4.set_active(model.lst_development[3])
self.chkDevEnvQ5.set_active(model.lst_development[4])
self.chkDevEnvQ6.set_active(model.lst_development[5])
self.chkDevEnvQ7.set_active(model.lst_development[6])
self.chkDevEnvQ8.set_active(model.lst_development[7])
self.chkDevEnvQ9.set_active(model.lst_development[8])
self.chkDevEnvQ10.set_active(model.lst_development[9])
self.chkDevEnvQ11.set_active(model.lst_development[10])
self.chkDevEnvQ12.set_active(model.lst_development[11])
self.chkDevEnvQ13.set_active(model.lst_development[12])
self.chkDevEnvQ14.set_active(model.lst_development[13])
self.chkDevEnvQ15.set_active(model.lst_development[14])
self.chkDevEnvQ16.set_active(model.lst_development[15])
self.chkDevEnvQ17.set_active(model.lst_development[16])
self.chkDevEnvQ18.set_active(model.lst_development[17])
self.chkDevEnvQ19.set_active(model.lst_development[18])
self.chkDevEnvQ20.set_active(model.lst_development[19])
self.chkDevEnvQ21.set_active(model.lst_development[20])
self.chkDevEnvQ22.set_active(model.lst_development[21])
self.chkDevEnvQ23.set_active(model.lst_development[22])
self.chkDevEnvQ24.set_active(model.lst_development[23])
self.chkDevEnvQ25.set_active(model.lst_development[24])
self.chkDevEnvQ26.set_active(model.lst_development[25])
self.chkDevEnvQ27.set_active(model.lst_development[26])
self.chkDevEnvQ28.set_active(model.lst_development[27])
self.chkDevEnvQ29.set_active(model.lst_development[28])
self.chkDevEnvQ30.set_active(model.lst_development[29])
self.chkDevEnvQ31.set_active(model.lst_development[30])
self.chkDevEnvQ32.set_active(model.lst_development[31])
self.chkDevEnvQ33.set_active(model.lst_development[32])
self.chkDevEnvQ34.set_active(model.lst_development[33])
self.chkDevEnvQ35.set_active(model.lst_development[34])
self.chkDevEnvQ36.set_active(model.lst_development[35])
self.chkDevEnvQ37.set_active(model.lst_development[36])
self.chkDevEnvQ38.set_active(model.lst_development[37])
self.chkDevEnvQ39.set_active(model.lst_development[38])
self.chkDevEnvQ40.set_active(model.lst_development[39])
self.chkDevEnvQ41.set_active(model.lst_development[40])
self.chkDevEnvQ42.set_active(model.lst_development[41])
self.chkDevEnvQ43.set_active(model.lst_development[42])
return False
def _on_toggled(self, check, index):
"""
Callback method for gtk.CheckButton() 'toggled' event.
:param gtk.CheckButton check: the gtk.CheckButton() that called this
method.
:param int index: the index of the Development Environment question
associated with the gtk.CheckButton() that was
toggled.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
check.handler_block(self._lst_handler_id[index])
self._software_model.lst_development[index] = int(check.get_active())
check.handler_unblock(self._lst_handler_id[index])
return False
|
normal
|
{
"blob_id": "327371d373819273a2f77f63e0cedee6950dbc46",
"index": 976,
"step-1": "<mask token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <mask token>\n <mask token>\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"step-2": "<mask token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <mask token>\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"step-3": "<mask token>\ntry:\n import pygtk\n pygtk.require('2.0')\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk.glade\nexcept ImportError:\n sys.exit(1)\ntry:\n import Configuration\n import gui.gtk.Widgets as Widgets\nexcept ImportError:\n import rtk.Configuration as Configuration\n import rtk.gui.gtk.Widgets as Widgets\n<mask token>\ntry:\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, '')\n<mask token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n \"\"\"\n The Work Book view for analyzing and displaying the risk associated with\n the development environment. The attributes of a development environment\n Work Book view are:\n\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\n display.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"step-4": "<mask token>\nimport sys\nimport gettext\nimport locale\ntry:\n import pygtk\n pygtk.require('2.0')\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk.glade\nexcept ImportError:\n sys.exit(1)\ntry:\n import Configuration\n import gui.gtk.Widgets as Widgets\nexcept ImportError:\n import rtk.Configuration as Configuration\n import rtk.gui.gtk.Widgets as Widgets\n__author__ = 'Andrew Rowland'\n__email__ = 'andrew.rowland@reliaqual.com'\n__organization__ = 'ReliaQual Associates, LLC'\n__copyright__ = 'Copyright 2007 - 2015 Andrew \"weibullguy\" Rowland'\ntry:\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, '')\n_ = gettext.gettext\n\n\nclass RiskAnalysis(gtk.VPaned):\n \"\"\"\n The Work Book view for analyzing and displaying the risk associated with\n the development environment. The attributes of a development environment\n Work Book view are:\n\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\n display.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"step-5": "#!/usr/bin/env python\r\n\"\"\"\r\n##############################################################################\r\nSoftware Package Risk Analysis Development Environment Specific Work Book View\r\n##############################################################################\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n#\r\n# rtk.software.__gui.gtk.DevelopmentEnvironment.py is part of The RTK\r\n# Project\r\n#\r\n# All rights reserved.\r\n\r\nimport sys\r\n\r\n# Import modules for localization support.\r\nimport gettext\r\nimport locale\r\n\r\n# Modules required for the GUI.\r\ntry:\r\n import pygtk\r\n pygtk.require('2.0')\r\nexcept ImportError:\r\n sys.exit(1)\r\ntry:\r\n import gtk\r\nexcept ImportError:\r\n sys.exit(1)\r\ntry:\r\n import gtk.glade\r\nexcept ImportError:\r\n sys.exit(1)\r\n\r\n# Import other RTK modules.\r\ntry:\r\n import Configuration\r\n import gui.gtk.Widgets as Widgets\r\nexcept ImportError:\r\n import rtk.Configuration as Configuration\r\n import rtk.gui.gtk.Widgets as Widgets\r\n\r\n__author__ = 'Andrew Rowland'\r\n__email__ = 'andrew.rowland@reliaqual.com'\r\n__organization__ = 'ReliaQual Associates, LLC'\r\n__copyright__ = 'Copyright 2007 - 2015 Andrew \"weibullguy\" Rowland'\r\n\r\ntry:\r\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\r\nexcept locale.Error:\r\n locale.setlocale(locale.LC_ALL, '')\r\n\r\n_ = gettext.gettext\r\n\r\n\r\nclass RiskAnalysis(gtk.VPaned):\r\n \"\"\"\r\n The Work Book view for analyzing and displaying the risk associated with\r\n the development environment. The attributes of a development environment\r\n Work Book view are:\r\n\r\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\r\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\r\n display.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Method to initialize the development environment risk analysis\r\n questions Work Book page.\r\n \"\"\"\r\n\r\n gtk.VPaned.__init__(self)\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lst_handler_id = []\r\n\r\n # Define private scalar attributes.\r\n self._software_model = None\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.chkDevEnvQ1 = Widgets.make_check_button()\r\n self.chkDevEnvQ2 = Widgets.make_check_button()\r\n self.chkDevEnvQ3 = Widgets.make_check_button()\r\n self.chkDevEnvQ4 = Widgets.make_check_button()\r\n self.chkDevEnvQ5 = Widgets.make_check_button()\r\n self.chkDevEnvQ6 = Widgets.make_check_button()\r\n self.chkDevEnvQ7 = Widgets.make_check_button()\r\n self.chkDevEnvQ8 = Widgets.make_check_button()\r\n self.chkDevEnvQ9 = Widgets.make_check_button()\r\n self.chkDevEnvQ10 = Widgets.make_check_button()\r\n self.chkDevEnvQ11 = Widgets.make_check_button()\r\n self.chkDevEnvQ12 = Widgets.make_check_button()\r\n self.chkDevEnvQ13 = Widgets.make_check_button()\r\n self.chkDevEnvQ14 = Widgets.make_check_button()\r\n self.chkDevEnvQ15 = Widgets.make_check_button()\r\n self.chkDevEnvQ16 = Widgets.make_check_button()\r\n self.chkDevEnvQ17 = Widgets.make_check_button()\r\n self.chkDevEnvQ18 = Widgets.make_check_button()\r\n self.chkDevEnvQ19 = Widgets.make_check_button()\r\n self.chkDevEnvQ20 = Widgets.make_check_button()\r\n self.chkDevEnvQ21 = Widgets.make_check_button()\r\n self.chkDevEnvQ22 = Widgets.make_check_button()\r\n self.chkDevEnvQ23 = Widgets.make_check_button()\r\n self.chkDevEnvQ24 = Widgets.make_check_button()\r\n self.chkDevEnvQ25 = Widgets.make_check_button()\r\n self.chkDevEnvQ26 = Widgets.make_check_button()\r\n self.chkDevEnvQ27 = Widgets.make_check_button()\r\n self.chkDevEnvQ28 = Widgets.make_check_button()\r\n self.chkDevEnvQ29 = Widgets.make_check_button()\r\n self.chkDevEnvQ30 = Widgets.make_check_button()\r\n self.chkDevEnvQ31 = Widgets.make_check_button()\r\n self.chkDevEnvQ32 = Widgets.make_check_button()\r\n self.chkDevEnvQ33 = Widgets.make_check_button()\r\n self.chkDevEnvQ34 = Widgets.make_check_button()\r\n self.chkDevEnvQ35 = Widgets.make_check_button()\r\n self.chkDevEnvQ36 = Widgets.make_check_button()\r\n self.chkDevEnvQ37 = Widgets.make_check_button()\r\n self.chkDevEnvQ38 = Widgets.make_check_button()\r\n self.chkDevEnvQ39 = Widgets.make_check_button()\r\n self.chkDevEnvQ40 = Widgets.make_check_button()\r\n self.chkDevEnvQ41 = Widgets.make_check_button()\r\n self.chkDevEnvQ42 = Widgets.make_check_button()\r\n self.chkDevEnvQ43 = Widgets.make_check_button()\r\n\r\n # Connect gtk.Widget() signals to callback methods.\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ1.connect('toggled', self._on_toggled, 0))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ2.connect('toggled', self._on_toggled, 1))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ3.connect('toggled', self._on_toggled, 2))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ4.connect('toggled', self._on_toggled, 3))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ5.connect('toggled', self._on_toggled, 4))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ6.connect('toggled', self._on_toggled, 5))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ7.connect('toggled', self._on_toggled, 6))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ8.connect('toggled', self._on_toggled, 7))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ9.connect('toggled', self._on_toggled, 8))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ10.connect('toggled', self._on_toggled, 9))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ11.connect('toggled', self._on_toggled, 10))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ12.connect('toggled', self._on_toggled, 11))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ13.connect('toggled', self._on_toggled, 12))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ14.connect('toggled', self._on_toggled, 13))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ15.connect('toggled', self._on_toggled, 14))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ16.connect('toggled', self._on_toggled, 15))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ17.connect('toggled', self._on_toggled, 16))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ18.connect('toggled', self._on_toggled, 17))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ19.connect('toggled', self._on_toggled, 18))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ20.connect('toggled', self._on_toggled, 19))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ21.connect('toggled', self._on_toggled, 20))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ22.connect('toggled', self._on_toggled, 21))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ23.connect('toggled', self._on_toggled, 22))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ24.connect('toggled', self._on_toggled, 23))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ25.connect('toggled', self._on_toggled, 24))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ26.connect('toggled', self._on_toggled, 25))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ27.connect('toggled', self._on_toggled, 26))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ28.connect('toggled', self._on_toggled, 27))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ29.connect('toggled', self._on_toggled, 28))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ30.connect('toggled', self._on_toggled, 29))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ31.connect('toggled', self._on_toggled, 30))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ32.connect('toggled', self._on_toggled, 31))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ33.connect('toggled', self._on_toggled, 32))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ34.connect('toggled', self._on_toggled, 33))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ35.connect('toggled', self._on_toggled, 34))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ36.connect('toggled', self._on_toggled, 35))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ37.connect('toggled', self._on_toggled, 36))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ38.connect('toggled', self._on_toggled, 37))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ39.connect('toggled', self._on_toggled, 38))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ40.connect('toggled', self._on_toggled, 39))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ41.connect('toggled', self._on_toggled, 40))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ42.connect('toggled', self._on_toggled, 41))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ43.connect('toggled', self._on_toggled, 42))\r\n\r\n def create_risk_analysis_page(self, notebook):\r\n \"\"\"\r\n Method to create the development environment risk analysis page and add\r\n it to the risk analysis gtk.Notebook().\r\n\r\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\r\n hold the development environment risk\r\n analysis questions.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _hpaned = gtk.HPaned()\r\n self.pack1(_hpaned, resize=True, shrink=True)\r\n\r\n # Create the organizational risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Organization\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack1(_frame, True, True)\r\n\r\n _labels = [_(u\"1. There are separate design and coding \"\r\n u\"organizations.\"),\r\n _(u\"2. There is an independent software test \"\r\n u\"organization.\"),\r\n _(u\"3. There is an independent software quality \"\r\n u\"assurance organization.\"),\r\n _(u\"4. There is an independent software configuration \"\r\n u\"management organization.\"),\r\n _(u\"5. There is an independent software verification \"\r\n u\"and validation organization.\"),\r\n _(u\"6. A structured programming team will develop the \"\r\n u\"software.\"),\r\n _(u\"7. The educational level of the software team members \"\r\n u\"is above average.\"),\r\n _(u\"8. The experience level of the software team members \"\r\n u\"is above average.\")]\r\n (_x_pos,\r\n _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n _x_pos += 125\r\n\r\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\r\n\r\n # Create the methods risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Methods\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack2(_frame, True, True)\r\n\r\n _labels = [_(u\"1. Standards are defined and will be enforced.\"),\r\n _(u\"2. Software will be developed using a higher order \"\r\n u\"language.\"),\r\n _(u\"3. The development process will include formal \"\r\n u\"reviews (PDR, CDR, etc.).\"),\r\n _(u\"4. The development process will include frequent \"\r\n u\"walkthroughs.\"),\r\n _(u\"5. Development will take a top-down and \"\r\n u\"structured approach.\"),\r\n _(u\"6. Unit development folders will be used.\"),\r\n _(u\"7. A software development library will be used.\"),\r\n _(u\"8. A formal change and error reporting process \"\r\n u\"will be used.\"),\r\n _(u\"9. Progress and status will routinely be \"\r\n u\"reported.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\r\n\r\n # Create the documentation risk pane.\r\n _hpaned = gtk.HPaned()\r\n self.pack2(_hpaned, resize=True, shrink=True)\r\n\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Documentation\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack1(_frame, True, True)\r\n\r\n _labels = [_(u\" 1. System requirements specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 2. Software requirements specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 3. Interface design specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 4. Software design specification will be \"\r\n u\"documented.\"),\r\n _(u\" 5. Test plans, procedures, and reports will be \"\r\n u\"documented.\"),\r\n _(u\" 6. The software development plan will be \"\r\n u\"documented.\"),\r\n _(u\" 7. The software quality assurance plan will be \"\r\n u\"documented.\"),\r\n _(u\" 8. The software configuration management plan will \"\r\n u\"be documented.\"),\r\n _(u\" 9. A requirements traceability matrix will be \"\r\n u\"used.\"),\r\n _(u\"10. The software version description will be \"\r\n u\"documented.\"),\r\n _(u\"11. All software discrepancies will be \"\r\n u\"documented.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\r\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\r\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\r\n\r\n # Create the tools and test techniques risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Tools & Test Techniques\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack2(_frame, True, True)\r\n\r\n _labels = [_(u\" 1. The software language requirements will be \"\r\n u\"specified.\"),\r\n _(u\" 2. Formal program design language will be used.\"),\r\n _(u\" 3. Program design graphical techniques \"\r\n u\"(flowcharts, HIPO, etc.) will be used.\"),\r\n _(u\" 4. Simulation/emulation tools will be used.\"),\r\n _(u\" 5. Configuration management tools will be used.\"),\r\n _(u\" 6. A code auditing tool will be used.\"),\r\n _(u\" 7. A data flow analyzer will be used.\"),\r\n _(u\" 8. A programmer's workbench will be used.\"),\r\n _(u\" 9. Measurement tools will be used.\"),\r\n _(u\"10. Software code reviews will be used.\"),\r\n _(u\"11. Software branch testing will be used.\"),\r\n _(u\"12. Random testing will be used.\"),\r\n _(u\"13. Functional testing will be used.\"),\r\n _(u\"14. Error and anomaly detection testing will be \"\r\n u\"used.\"),\r\n _(u\"15. Structure analysis will be used.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\r\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\r\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\r\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\r\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\r\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\r\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\r\n\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" +\r\n _(u\"Development\\nEnvironment\") +\r\n \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_angle(0)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Assesses risk due to the development \"\r\n u\"environment.\"))\r\n notebook.insert_page(self, tab_label=_label, position=-1)\r\n\r\n return False\r\n\r\n def load(self, model):\r\n \"\"\"\r\n Method to load the Development Environment Risk Analysis answers.\r\n\r\n :param `rtk.software.Software` model: the Software data model to load\r\n the gtk.ToggleButton() from.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n self._software_model = model\r\n\r\n self.chkDevEnvQ1.set_active(model.lst_development[0])\r\n self.chkDevEnvQ2.set_active(model.lst_development[1])\r\n self.chkDevEnvQ3.set_active(model.lst_development[2])\r\n self.chkDevEnvQ4.set_active(model.lst_development[3])\r\n self.chkDevEnvQ5.set_active(model.lst_development[4])\r\n self.chkDevEnvQ6.set_active(model.lst_development[5])\r\n self.chkDevEnvQ7.set_active(model.lst_development[6])\r\n self.chkDevEnvQ8.set_active(model.lst_development[7])\r\n self.chkDevEnvQ9.set_active(model.lst_development[8])\r\n self.chkDevEnvQ10.set_active(model.lst_development[9])\r\n self.chkDevEnvQ11.set_active(model.lst_development[10])\r\n self.chkDevEnvQ12.set_active(model.lst_development[11])\r\n self.chkDevEnvQ13.set_active(model.lst_development[12])\r\n self.chkDevEnvQ14.set_active(model.lst_development[13])\r\n self.chkDevEnvQ15.set_active(model.lst_development[14])\r\n self.chkDevEnvQ16.set_active(model.lst_development[15])\r\n self.chkDevEnvQ17.set_active(model.lst_development[16])\r\n self.chkDevEnvQ18.set_active(model.lst_development[17])\r\n self.chkDevEnvQ19.set_active(model.lst_development[18])\r\n self.chkDevEnvQ20.set_active(model.lst_development[19])\r\n self.chkDevEnvQ21.set_active(model.lst_development[20])\r\n self.chkDevEnvQ22.set_active(model.lst_development[21])\r\n self.chkDevEnvQ23.set_active(model.lst_development[22])\r\n self.chkDevEnvQ24.set_active(model.lst_development[23])\r\n self.chkDevEnvQ25.set_active(model.lst_development[24])\r\n self.chkDevEnvQ26.set_active(model.lst_development[25])\r\n self.chkDevEnvQ27.set_active(model.lst_development[26])\r\n self.chkDevEnvQ28.set_active(model.lst_development[27])\r\n self.chkDevEnvQ29.set_active(model.lst_development[28])\r\n self.chkDevEnvQ30.set_active(model.lst_development[29])\r\n self.chkDevEnvQ31.set_active(model.lst_development[30])\r\n self.chkDevEnvQ32.set_active(model.lst_development[31])\r\n self.chkDevEnvQ33.set_active(model.lst_development[32])\r\n self.chkDevEnvQ34.set_active(model.lst_development[33])\r\n self.chkDevEnvQ35.set_active(model.lst_development[34])\r\n self.chkDevEnvQ36.set_active(model.lst_development[35])\r\n self.chkDevEnvQ37.set_active(model.lst_development[36])\r\n self.chkDevEnvQ38.set_active(model.lst_development[37])\r\n self.chkDevEnvQ39.set_active(model.lst_development[38])\r\n self.chkDevEnvQ40.set_active(model.lst_development[39])\r\n self.chkDevEnvQ41.set_active(model.lst_development[40])\r\n self.chkDevEnvQ42.set_active(model.lst_development[41])\r\n self.chkDevEnvQ43.set_active(model.lst_development[42])\r\n\r\n return False\r\n\r\n def _on_toggled(self, check, index):\r\n \"\"\"\r\n Callback method for gtk.CheckButton() 'toggled' event.\r\n\r\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\r\n method.\r\n :param int index: the index of the Development Environment question\r\n associated with the gtk.CheckButton() that was\r\n toggled.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n check.handler_block(self._lst_handler_id[index])\r\n\r\n self._software_model.lst_development[index] = int(check.get_active())\r\n\r\n check.handler_unblock(self._lst_handler_id[index])\r\n\r\n return False\r\n",
"step-ids": [
4,
5,
7,
9,
10
]
}
|
[
4,
5,
7,
9,
10
] |
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.utils import IntegrityError
from django.shortcuts import redirect, render
from django.utils.translation import gettext_lazy as _
from keymanager.settings import PAGE_SIZE
from .forms import LoginForm
from .forms import UserCreateForm, UserEditForm
from utils.filters import require_superuser
LOG = logging.getLogger(__name__)
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or \
user_id.encode("utf-8") == str(request.user.id):
return func(request, user_id)
return render(request, "403.html")
return check
@require_superuser
def index(request):
template_name = "users/index.html"
msg = ""
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _("Unable to list users.")
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {"users": users, "message": msg})
@require_superuser
def create(request):
template_name = "users/create_user.html"
msg = ""
user_form = UserCreateForm()
if request.method == "POST":
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(
request.POST['username'],
request.POST['email'],
request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.info(msg)
except IntegrityError:
msg = _("User already exist, please try another username.")
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.error(msg)
return render(request, template_name, {"user_form": user_form,
"message": msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _("Unable to delete user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _("Unable to deactivate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _("Unable to activate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = "users/update_user.html"
msg = ""
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={"username": user.username,
"email": user.email})
if request.method == "POST":
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode("utf-8")
LOG.info(msg)
return render(request, template_name, {"user_id": user_id,
"user_form": user_form,
"message": msg})
def login(request):
template_name = 'auth/login.html'
msg = ""
if request.user.is_authenticated():
return redirect(reverse("keys:index"))
form = LoginForm
if request.method == "POST":
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _("%s logged in successfully.") % \
username.encode('utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _("Invalid username or password.")
LOG.error(msg)
return render(request, template_name, {"user_form": form,
"message": msg})
def logout(request):
django_logout(request)
return redirect(reverse("index"))
|
normal
|
{
"blob_id": "b739a5d359b4d1c0323c7cd8234e4fe5eb9f3fcb",
"index": 6286,
"step-1": "<mask token>\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-2": "<mask token>\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-3": "<mask token>\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-4": "import logging\nfrom django.contrib.auth import authenticate, login as django_login, logout as django_logout\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\nfrom keymanager.settings import PAGE_SIZE\nfrom .forms import LoginForm\nfrom .forms import UserCreateForm, UserEditForm\nfrom utils.filters import require_superuser\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth import authenticate, login as django_login, logout as django_logout\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\n\nfrom keymanager.settings import PAGE_SIZE\n\nfrom .forms import LoginForm\nfrom .forms import UserCreateForm, UserEditForm\nfrom utils.filters import require_superuser\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n def check(request, user_id):\n if request.user.is_superuser or \\\n user_id.encode(\"utf-8\") == str(request.user.id):\n return func(request, user_id)\n\n return render(request, \"403.html\")\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = \"users/index.html\"\n msg = \"\"\n\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _(\"Unable to list users.\")\n LOG.error(msg)\n users = []\n\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n\n return render(request, template_name, {\"users\": users, \"message\": msg})\n\n\n@require_superuser\ndef create(request):\n template_name = \"users/create_user.html\"\n msg = \"\"\n user_form = UserCreateForm()\n\n if request.method == \"POST\":\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(\n request.POST['username'],\n request.POST['email'],\n request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % \\\n user_form.cleaned_data['username'].encode(\"utf-8\")\n LOG.info(msg)\n except IntegrityError:\n msg = _(\"User already exist, please try another username.\")\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % \\\n user_form.cleaned_data['username'].encode(\"utf-8\")\n LOG.error(msg)\n\n return render(request, template_name, {\"user_form\": user_form,\n \"message\": msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _(\"Unable to delete user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _(\"Unable to deactivate user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _(\"Unable to activate user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = \"users/update_user.html\"\n msg = \"\"\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={\"username\": user.username,\n \"email\": user.email})\n\n if request.method == \"POST\":\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode(\"utf-8\")\n LOG.info(msg)\n return render(request, template_name, {\"user_id\": user_id,\n \"user_form\": user_form,\n \"message\": msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = \"\"\n if request.user.is_authenticated():\n return redirect(reverse(\"keys:index\"))\n\n form = LoginForm\n\n if request.method == \"POST\":\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data[\"password\"]\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _(\"%s logged in successfully.\") % \\\n username.encode('utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _(\"Invalid username or password.\")\n LOG.error(msg)\n\n return render(request, template_name, {\"user_form\": form,\n \"message\": msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse(\"index\"))",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
client.request(method='POST', url='/', body=post_data.encode('utf-8'),
headers=head_dict)
<|reserved_special_token_0|>
client.close()
print(content)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
client = http.client.HTTPConnection('127.0.0.1:9000')
post_data = {'usertag': 'test', 'password': '123456', 'code':
"print('Hello Web')"}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method='POST', url='/', body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode('utf-8')
client.close()
print(content)
<|reserved_special_token_1|>
import http.client
from urllib.parse import urlencode
client = http.client.HTTPConnection('127.0.0.1:9000')
post_data = {'usertag': 'test', 'password': '123456', 'code':
"print('Hello Web')"}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method='POST', url='/', body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode('utf-8')
client.close()
print(content)
<|reserved_special_token_1|>
import http.client
from urllib.parse import urlencode
client = http.client.HTTPConnection("127.0.0.1:9000")
post_data = {
"usertag": "test",
"password": '123456',
'code': "print('Hello Web')"
}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method="POST", url='/',
body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode("utf-8")
client.close()
print(content)
|
flexible
|
{
"blob_id": "ee1ce3ea4b31246703530478d6550b0c8866197e",
"index": 1190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nclient.request(method='POST', url='/', body=post_data.encode('utf-8'),\n headers=head_dict)\n<mask token>\nclient.close()\nprint(content)\n",
"step-3": "<mask token>\nclient = http.client.HTTPConnection('127.0.0.1:9000')\npost_data = {'usertag': 'test', 'password': '123456', 'code':\n \"print('Hello Web')\"}\nhead_dict = {'Content-Type': 'application/x-www-form-urlencoded'}\npost_data = urlencode(post_data)\nclient.request(method='POST', url='/', body=post_data.encode('utf-8'),\n headers=head_dict)\nresp = client.getresponse()\ncontent = resp.read().decode('utf-8')\nclient.close()\nprint(content)\n",
"step-4": "import http.client\nfrom urllib.parse import urlencode\nclient = http.client.HTTPConnection('127.0.0.1:9000')\npost_data = {'usertag': 'test', 'password': '123456', 'code':\n \"print('Hello Web')\"}\nhead_dict = {'Content-Type': 'application/x-www-form-urlencoded'}\npost_data = urlencode(post_data)\nclient.request(method='POST', url='/', body=post_data.encode('utf-8'),\n headers=head_dict)\nresp = client.getresponse()\ncontent = resp.read().decode('utf-8')\nclient.close()\nprint(content)\n",
"step-5": "import http.client\nfrom urllib.parse import urlencode\nclient = http.client.HTTPConnection(\"127.0.0.1:9000\")\npost_data = {\n \"usertag\": \"test\",\n \"password\": '123456',\n 'code': \"print('Hello Web')\"\n}\nhead_dict = {'Content-Type': 'application/x-www-form-urlencoded'}\npost_data = urlencode(post_data)\nclient.request(method=\"POST\", url='/',\n body=post_data.encode('utf-8'),\n headers=head_dict)\nresp = client.getresponse()\ncontent = resp.read().decode(\"utf-8\")\nclient.close()\nprint(content)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def longest_substring(string1, string2):
mat = np.zeros(shape=(len(string1), len(string2)))
for x in range(len(string1)):
for y in range(len(string2)):
if x == 0 or y == 0:
if string1[x] == string2[y]:
mat[x, y] = 1
elif string1[x] == string2[y]:
mat[x, y] = mat[x - 1, y - 1] + 1
agmx = np.argmax(mat)
iofagmx = np.unravel_index(agmx, mat.shape)
numbofstr = int(np.max(mat))
endstring = string1[iofagmx[0] - numbofstr + 1:iofagmx[0] + 1]
return endstring
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def longest_substring(string1, string2):
mat = np.zeros(shape=(len(string1), len(string2)))
for x in range(len(string1)):
for y in range(len(string2)):
if x == 0 or y == 0:
if string1[x] == string2[y]:
mat[x, y] = 1
elif string1[x] == string2[y]:
mat[x, y] = mat[x - 1, y - 1] + 1
agmx = np.argmax(mat)
iofagmx = np.unravel_index(agmx, mat.shape)
numbofstr = int(np.max(mat))
endstring = string1[iofagmx[0] - numbofstr + 1:iofagmx[0] + 1]
return endstring
if __name__ == '__main__':
assert longest_substring('jsanad', 'anasc') == 'ana'
assert longest_substring('ilovebioinformatics', 'icantwaitformax'
) == 'forma'
assert longest_substring('ironmansaregreat', 'triathlonforever') == 'on'
assert longest_substring('ihatewalking', 'nobikenolife') == 'i'
assert longest_substring('gofaster', 'govegan') == 'go'
<|reserved_special_token_1|>
import numpy as np
def longest_substring(string1, string2):
mat = np.zeros(shape=(len(string1), len(string2)))
for x in range(len(string1)):
for y in range(len(string2)):
if x == 0 or y == 0:
if string1[x] == string2[y]:
mat[x, y] = 1
elif string1[x] == string2[y]:
mat[x, y] = mat[x - 1, y - 1] + 1
agmx = np.argmax(mat)
iofagmx = np.unravel_index(agmx, mat.shape)
numbofstr = int(np.max(mat))
endstring = string1[iofagmx[0] - numbofstr + 1:iofagmx[0] + 1]
return endstring
if __name__ == '__main__':
assert longest_substring('jsanad', 'anasc') == 'ana'
assert longest_substring('ilovebioinformatics', 'icantwaitformax'
) == 'forma'
assert longest_substring('ironmansaregreat', 'triathlonforever') == 'on'
assert longest_substring('ihatewalking', 'nobikenolife') == 'i'
assert longest_substring('gofaster', 'govegan') == 'go'
<|reserved_special_token_1|>
import numpy as np
#1
def longest_substring(string1,string2):
mat=np.zeros(shape=(len(string1),len(string2)))
for x in range(len(string1)):
for y in range(len(string2)):
if x==0 or y==0:
if string1[x]==string2[y]:
mat[x,y]=1
else:
if string1[x]==string2[y]:
mat[x,y]=mat[x-1,y-1]+1
agmx=np.argmax(mat)
iofagmx=np.unravel_index(agmx,mat.shape)
numbofstr=int(np.max(mat))
endstring=string1[iofagmx[0]-numbofstr+1:iofagmx[0]+1]
return endstring
if __name__ == '__main__':
assert longest_substring("jsanad","anasc") == "ana"
assert longest_substring("ilovebioinformatics","icantwaitformax") == "forma"
assert longest_substring("ironmansaregreat","triathlonforever") == "on"
assert longest_substring("ihatewalking","nobikenolife") == "i"
assert longest_substring("gofaster","govegan") == "go"
|
flexible
|
{
"blob_id": "6bb7dafea73aff7aca9b0ddc1393e4db6fcf0151",
"index": 4828,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef longest_substring(string1, string2):\n mat = np.zeros(shape=(len(string1), len(string2)))\n for x in range(len(string1)):\n for y in range(len(string2)):\n if x == 0 or y == 0:\n if string1[x] == string2[y]:\n mat[x, y] = 1\n elif string1[x] == string2[y]:\n mat[x, y] = mat[x - 1, y - 1] + 1\n agmx = np.argmax(mat)\n iofagmx = np.unravel_index(agmx, mat.shape)\n numbofstr = int(np.max(mat))\n endstring = string1[iofagmx[0] - numbofstr + 1:iofagmx[0] + 1]\n return endstring\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef longest_substring(string1, string2):\n mat = np.zeros(shape=(len(string1), len(string2)))\n for x in range(len(string1)):\n for y in range(len(string2)):\n if x == 0 or y == 0:\n if string1[x] == string2[y]:\n mat[x, y] = 1\n elif string1[x] == string2[y]:\n mat[x, y] = mat[x - 1, y - 1] + 1\n agmx = np.argmax(mat)\n iofagmx = np.unravel_index(agmx, mat.shape)\n numbofstr = int(np.max(mat))\n endstring = string1[iofagmx[0] - numbofstr + 1:iofagmx[0] + 1]\n return endstring\n\n\nif __name__ == '__main__':\n assert longest_substring('jsanad', 'anasc') == 'ana'\n assert longest_substring('ilovebioinformatics', 'icantwaitformax'\n ) == 'forma'\n assert longest_substring('ironmansaregreat', 'triathlonforever') == 'on'\n assert longest_substring('ihatewalking', 'nobikenolife') == 'i'\n assert longest_substring('gofaster', 'govegan') == 'go'\n",
"step-4": "import numpy as np\n\n\ndef longest_substring(string1, string2):\n mat = np.zeros(shape=(len(string1), len(string2)))\n for x in range(len(string1)):\n for y in range(len(string2)):\n if x == 0 or y == 0:\n if string1[x] == string2[y]:\n mat[x, y] = 1\n elif string1[x] == string2[y]:\n mat[x, y] = mat[x - 1, y - 1] + 1\n agmx = np.argmax(mat)\n iofagmx = np.unravel_index(agmx, mat.shape)\n numbofstr = int(np.max(mat))\n endstring = string1[iofagmx[0] - numbofstr + 1:iofagmx[0] + 1]\n return endstring\n\n\nif __name__ == '__main__':\n assert longest_substring('jsanad', 'anasc') == 'ana'\n assert longest_substring('ilovebioinformatics', 'icantwaitformax'\n ) == 'forma'\n assert longest_substring('ironmansaregreat', 'triathlonforever') == 'on'\n assert longest_substring('ihatewalking', 'nobikenolife') == 'i'\n assert longest_substring('gofaster', 'govegan') == 'go'\n",
"step-5": "import numpy as np\n#1\ndef longest_substring(string1,string2):\n mat=np.zeros(shape=(len(string1),len(string2)))\n for x in range(len(string1)):\n for y in range(len(string2)):\n if x==0 or y==0:\n if string1[x]==string2[y]:\n mat[x,y]=1\n else:\n if string1[x]==string2[y]:\n mat[x,y]=mat[x-1,y-1]+1\n agmx=np.argmax(mat)\n iofagmx=np.unravel_index(agmx,mat.shape)\n numbofstr=int(np.max(mat))\n endstring=string1[iofagmx[0]-numbofstr+1:iofagmx[0]+1]\n return endstring\n \nif __name__ == '__main__':\n assert longest_substring(\"jsanad\",\"anasc\") == \"ana\"\n assert longest_substring(\"ilovebioinformatics\",\"icantwaitformax\") == \"forma\"\n assert longest_substring(\"ironmansaregreat\",\"triathlonforever\") == \"on\"\n assert longest_substring(\"ihatewalking\",\"nobikenolife\") == \"i\"\n assert longest_substring(\"gofaster\",\"govegan\") == \"go\" \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_main():
"""
Main function to process user input and then generate the description files for each run
:return: exit code -- 0 on success, 1 otherwise
"""
parser = argparse.ArgumentParser(description=
'Scan a run directory and create files to ')
parser.add_argument('--run-directory', dest='run_directory', action=
'store', default='', help='path to directory with xed files to process'
)
args = parser.parse_args(sys.argv[1:])
if not os.path.isdir(args.run_directory):
sys.stderr.write('{0} is not a directory, exiting\n'.format(args.
run_directory))
return 1
run_name = os.path.abspath(args.run_directory)
if os.path.basename(run_name):
run_name = os.path.basename(run_name)
else:
run_name = os.path.split(run_name)[0].split('/')[-1]
if not os.path.exists('info'):
os.mkdir('info')
for directory in os.listdir(args.run_directory):
if not os.path.isdir(os.path.join(args.run_directory, directory)):
continue
csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)
entries = glob.glob(os.path.join(args.run_directory, directory,
'*.xed'))
if len(entries) == 0:
continue
with open(csv_filename, 'w') as file_obj:
csv_writer = csv.writer(file_obj)
csv_writer.writerow(['Run', 'Data Set', 'File'])
for entry in entries:
uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +
'server?SFN=/cephfs/srm/xenon/' + entry.replace(
'/xenon/', ''))
csv_writer.writerow([run_name, directory, uri])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_main():
"""
Main function to process user input and then generate the description files for each run
:return: exit code -- 0 on success, 1 otherwise
"""
parser = argparse.ArgumentParser(description=
'Scan a run directory and create files to ')
parser.add_argument('--run-directory', dest='run_directory', action=
'store', default='', help='path to directory with xed files to process'
)
args = parser.parse_args(sys.argv[1:])
if not os.path.isdir(args.run_directory):
sys.stderr.write('{0} is not a directory, exiting\n'.format(args.
run_directory))
return 1
run_name = os.path.abspath(args.run_directory)
if os.path.basename(run_name):
run_name = os.path.basename(run_name)
else:
run_name = os.path.split(run_name)[0].split('/')[-1]
if not os.path.exists('info'):
os.mkdir('info')
for directory in os.listdir(args.run_directory):
if not os.path.isdir(os.path.join(args.run_directory, directory)):
continue
csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)
entries = glob.glob(os.path.join(args.run_directory, directory,
'*.xed'))
if len(entries) == 0:
continue
with open(csv_filename, 'w') as file_obj:
csv_writer = csv.writer(file_obj)
csv_writer.writerow(['Run', 'Data Set', 'File'])
for entry in entries:
uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +
'server?SFN=/cephfs/srm/xenon/' + entry.replace(
'/xenon/', ''))
csv_writer.writerow([run_name, directory, uri])
if __name__ == '__main__':
sys.exit(run_main())
<|reserved_special_token_1|>
import argparse
import csv
import glob
import os
import sys
def run_main():
"""
Main function to process user input and then generate the description files for each run
:return: exit code -- 0 on success, 1 otherwise
"""
parser = argparse.ArgumentParser(description=
'Scan a run directory and create files to ')
parser.add_argument('--run-directory', dest='run_directory', action=
'store', default='', help='path to directory with xed files to process'
)
args = parser.parse_args(sys.argv[1:])
if not os.path.isdir(args.run_directory):
sys.stderr.write('{0} is not a directory, exiting\n'.format(args.
run_directory))
return 1
run_name = os.path.abspath(args.run_directory)
if os.path.basename(run_name):
run_name = os.path.basename(run_name)
else:
run_name = os.path.split(run_name)[0].split('/')[-1]
if not os.path.exists('info'):
os.mkdir('info')
for directory in os.listdir(args.run_directory):
if not os.path.isdir(os.path.join(args.run_directory, directory)):
continue
csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)
entries = glob.glob(os.path.join(args.run_directory, directory,
'*.xed'))
if len(entries) == 0:
continue
with open(csv_filename, 'w') as file_obj:
csv_writer = csv.writer(file_obj)
csv_writer.writerow(['Run', 'Data Set', 'File'])
for entry in entries:
uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +
'server?SFN=/cephfs/srm/xenon/' + entry.replace(
'/xenon/', ''))
csv_writer.writerow([run_name, directory, uri])
if __name__ == '__main__':
sys.exit(run_main())
<|reserved_special_token_1|>
#!/usr/bin/env python
import argparse
import csv
import glob
import os
import sys
def run_main():
"""
Main function to process user input and then generate the description files for each run
:return: exit code -- 0 on success, 1 otherwise
"""
parser = argparse.ArgumentParser(description="Scan a run directory and create files to ")
parser.add_argument('--run-directory', dest='run_directory',
action='store', default='',
help='path to directory with xed files to process')
args = parser.parse_args(sys.argv[1:])
if not os.path.isdir(args.run_directory):
sys.stderr.write("{0} is not a directory, exiting\n".format(args.run_directory))
return 1
run_name = os.path.abspath(args.run_directory)
if os.path.basename(run_name):
run_name = os.path.basename(run_name)
else:
run_name = os.path.split(run_name)[0].split('/')[-1]
if not os.path.exists('info'):
os.mkdir('info')
for directory in os.listdir(args.run_directory):
if not os.path.isdir(os.path.join(args.run_directory, directory)):
continue
csv_filename = "info/{0}_{1}_files.csv".format(run_name, directory)
entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))
if len(entries) == 0:
continue
with open(csv_filename, 'w') as file_obj:
csv_writer = csv.writer(file_obj)
csv_writer.writerow(['Run', 'Data Set', 'File'])
for entry in entries:
uri = "srm://ceph-se.osgconnect.net:8443/srm/v2/" + \
"server?SFN=/cephfs/srm/xenon/" + \
entry.replace('/xenon/', '')
csv_writer.writerow([run_name, directory, uri])
if __name__ == '__main__':
sys.exit(run_main())
|
flexible
|
{
"blob_id": "6e6c6c5795e8723a86ae5dfc8f40df57d3dd10f7",
"index": 3336,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Scan a run directory and create files to ')\n parser.add_argument('--run-directory', dest='run_directory', action=\n 'store', default='', help='path to directory with xed files to process'\n )\n args = parser.parse_args(sys.argv[1:])\n if not os.path.isdir(args.run_directory):\n sys.stderr.write('{0} is not a directory, exiting\\n'.format(args.\n run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n if not os.path.exists('info'):\n os.mkdir('info')\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory,\n '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +\n 'server?SFN=/cephfs/srm/xenon/' + entry.replace(\n '/xenon/', ''))\n csv_writer.writerow([run_name, directory, uri])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Scan a run directory and create files to ')\n parser.add_argument('--run-directory', dest='run_directory', action=\n 'store', default='', help='path to directory with xed files to process'\n )\n args = parser.parse_args(sys.argv[1:])\n if not os.path.isdir(args.run_directory):\n sys.stderr.write('{0} is not a directory, exiting\\n'.format(args.\n run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n if not os.path.exists('info'):\n os.mkdir('info')\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory,\n '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +\n 'server?SFN=/cephfs/srm/xenon/' + entry.replace(\n '/xenon/', ''))\n csv_writer.writerow([run_name, directory, uri])\n\n\nif __name__ == '__main__':\n sys.exit(run_main())\n",
"step-4": "import argparse\nimport csv\nimport glob\nimport os\nimport sys\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Scan a run directory and create files to ')\n parser.add_argument('--run-directory', dest='run_directory', action=\n 'store', default='', help='path to directory with xed files to process'\n )\n args = parser.parse_args(sys.argv[1:])\n if not os.path.isdir(args.run_directory):\n sys.stderr.write('{0} is not a directory, exiting\\n'.format(args.\n run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n if not os.path.exists('info'):\n os.mkdir('info')\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory,\n '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +\n 'server?SFN=/cephfs/srm/xenon/' + entry.replace(\n '/xenon/', ''))\n csv_writer.writerow([run_name, directory, uri])\n\n\nif __name__ == '__main__':\n sys.exit(run_main())\n",
"step-5": "#!/usr/bin/env python\n\nimport argparse\nimport csv\nimport glob\nimport os\nimport sys\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Scan a run directory and create files to \")\n parser.add_argument('--run-directory', dest='run_directory',\n action='store', default='',\n help='path to directory with xed files to process')\n args = parser.parse_args(sys.argv[1:])\n\n if not os.path.isdir(args.run_directory):\n sys.stderr.write(\"{0} is not a directory, exiting\\n\".format(args.run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n\n if not os.path.exists('info'):\n os.mkdir('info')\n\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = \"info/{0}_{1}_files.csv\".format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = \"srm://ceph-se.osgconnect.net:8443/srm/v2/\" + \\\n \"server?SFN=/cephfs/srm/xenon/\" + \\\n entry.replace('/xenon/', '')\n csv_writer.writerow([run_name, directory, uri])\n\n\nif __name__ == '__main__':\n sys.exit(run_main())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Dict, List, Sequence, Iterable, Tuple
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.common.file_utils import cached_path
import logging
from overrides import overrides
import itertools
from allennlp.data.tokenizers import Token
from allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
polar_dict = {
"1": "Ture",
"0": "False"
}
@DatasetReader.register("bertclassification")
class ClassificationReader(DatasetReader):
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False
) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
polar, sent = line.strip().split(",")
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(
self,
tokens:List[Token],
polar
) -> Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
|
normal
|
{
"blob_id": "21172985bf36302f6b0b2101e353d9fbcafb0673",
"index": 6653,
"step-1": "<mask token>\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n <mask token>\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-2": "<mask token>\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:\n bool=False) ->None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens':\n SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\npolar_dict = {'1': 'Ture', '0': 'False'}\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:\n bool=False) ->None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens':\n SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-4": "from typing import Dict, List, Sequence, Iterable, Tuple\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.instance import Instance\nfrom allennlp.common.file_utils import cached_path\nimport logging\nfrom overrides import overrides\nimport itertools\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\nlogger = logging.getLogger(__name__)\npolar_dict = {'1': 'Ture', '0': 'False'}\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:\n bool=False) ->None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens':\n SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-5": "from typing import Dict, List, Sequence, Iterable, Tuple\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.instance import Instance\nfrom allennlp.common.file_utils import cached_path\nimport logging\nfrom overrides import overrides\nimport itertools\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\n\nlogger = logging.getLogger(__name__)\npolar_dict = {\n \"1\": \"Ture\",\n \"0\": \"False\"\n}\n\n@DatasetReader.register(\"bertclassification\")\nclass ClassificationReader(DatasetReader):\n def __init__(\n self,\n token_indexers: Dict[str, TokenIndexer] = None,\n lazy: bool = False\n ) -> None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}\n \n @overrides\n def _read(self, file_path: str) -> Iterable[Instance]:\n file_path = cached_path(file_path)\n\n with open(file_path, \"r\") as data_file:\n logger.info(\"Reading instances from lines in file at: %s\", file_path)\n for line in data_file:\n polar, sent = line.strip().split(\",\")\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n \n def text_to_instance(\n self,\n tokens:List[Token],\n polar\n ) -> Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields) \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# ARGS:
# 1: total train reviews
# 2: number of iterations (for csv output)
# 3: size of vector
# 4: good/bad sizes
# import dependencies
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import CountVectorizer
from random import shuffle
from sklearn.linear_model import LogisticRegression
from yelp_labeled_line_sentence import YelpLabeledLineSentence
from imdb_labeled_line_sentence import IMDBLabeledLineSentence
from sklearn.linear_model import SGDClassifier
import numpy
import json
import time
import os
import sys
import csv
dirname = os.path.dirname(__file__)
def compute_accuracy(model, good, bad):
# load our doc2vec model that we trained
# take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array
train_arrays = numpy.zeros((25000, 400))
train_labels = numpy.zeros(25000)
# create a logistic regression classifier
classifier = LogisticRegression()
# take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array
for i in range((25000/2)):
prefix_train_pos = 'good_' + str(i)
prefix_train_neg = 'bad_' + str(i)
pos_review = model.docvecs[prefix_train_pos]
neg_review = model.docvecs[prefix_train_neg]
train_arrays[i] = pos_review
train_labels[i] = 1
train_arrays[(25000/2) + i] = neg_review
train_labels[(25000/2) + i] = 0
classifier.fit(train_arrays, train_labels)
# take our test reviews from the model, and put them in array, good reviews first, bad reviews second half of array
# for each review, we'll infer the review's vector against our model
test_arrays_good = numpy.zeros((12500, 400))
test_ratings_good = numpy.zeros(12500)
test_labels_good = numpy.zeros(12500)
test_arrays_bad = numpy.zeros((12500, 400))
test_ratings_bad = numpy.zeros(12500)
test_labels_bad = numpy.zeros(12500)
test_arrays = numpy.zeros((25000, 400))
test_rating = numpy.zeros(25000)
test_labels = numpy.zeros(25000)
good_correct = 0
good_total = 0
bad_correct = 0
bad_total = 0
for i, review in enumerate(good):
test_arrays[i] = model.infer_vector(review[0])
test_labels[i] = 1
if(classifier.predict([test_arrays[i]]) == 1):
good_correct += 1
# test_ratings_good[i] = review[1][2]
for i, review in enumerate(bad):
test_arrays[i + 12500] = model.infer_vector(review[0])
test_labels[i + 12500] = 0
if(classifier.predict([test_arrays[i + 12500]]) == 0):
bad_correct += 1
# test_ratings_bad[i] = review[1][2]
# print the accuracy of our classifier
# accuracy=classifier.score(test_arrays_good, test_labels_good) * 100
# print("Classifier reports a {}% accuracy for good reviews".format(accuracy))
#
# accuracy=classifier.score(test_arrays_bad, test_labels_bad) * 100
# print("Classifier reports a {}% accuracy for bad reviews".format(accuracy))
#
accuracy=classifier.score(test_arrays, test_labels) * 100
print("Classifier reports a {}% accuracy".format(accuracy))
print("{} Good correctly identified".format(good_correct))
print("{} Bad correctly identified".format(bad_correct))
# for dim in range(1, int(sys.argv[3])):
# # plot probability of review being good vs feature vector value
# plt.scatter(test_arrays_good[:,dim], classifier.predict_proba(test_arrays_good)[:,1], color='green')
# plt.scatter(test_arrays_bad[:,dim], classifier.predict_proba(test_arrays_bad)[:,1], color='red')
#
# plt.ylabel('Probability of Review Being Good')
# plt.xlabel('dim={}'.format(dim))
# plt.show()
# # reduce the n-dimensional feature vector to n=1 using t-SNE
# tsne = TSNE(n_components=1)
# test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)
# test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)
#
# # plot probability of review being good vs feature vector value
# plt.scatter(test_arrays_tsne_good, classifier.predict_proba(test_arrays_good)[:,1], color='green')
# plt.scatter(test_arrays_tsne_bad, classifier.predict_proba(test_arrays_bad)[:,1], color='red')
#
# plt.ylabel('Probability of Review Being Good')
# plt.xlabel('t-SNE reduced feature vector (dim=1)')
# plt.show()
# # reduce the n-dimensional feature vector to n=1 using t-SNE
# tsne = TSNE(n_components=2)
# test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)
# test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)
#
# # plot feature vectors against each other
# plt.scatter(test_arrays_tsne_good[:,0], test_arrays_tsne_good[:,1], color='green')
# plt.scatter(test_arrays_tsne_bad[:,0], test_arrays_tsne_bad[:,1], color='red')
#
# plt.ylabel('x1')
# plt.xlabel('x2')
# plt.show()
yelp_model = Doc2Vec.load(os.path.join(dirname,'models/yelp_model.d2v'))
# imdb_model = Doc2Vec.load(os.path.join(dirname,'models/imdb_model.d2v'))
# create an array of LabeledLineSentences for previously unseen
# good and bad reviews
# this does some basic formatting of the text as well to make it more
# digestible by gensim and sklearn
yelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'good', 12500)
yelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'bad', 12500)
# imdb_sources_good = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/pos'):'good'})
# imdb_sources_bad = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/neg'):'bad'})
compute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)
# compute_accuracy(imdb_model, imdb_sources_good, imdb_sources_bad)
|
normal
|
{
"blob_id": "95015c467dd6371f575fb5535fe652a914650ef1",
"index": 2016,
"step-1": "<mask token>\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\n<mask token>\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n",
"step-3": "<mask token>\ndirname = os.path.dirname(__file__)\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\nyelp_model = Doc2Vec.load(os.path.join(dirname, 'models/yelp_model.d2v'))\nyelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'good', 12500)\nyelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'bad', 12500)\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n",
"step-4": "from gensim import utils\nfrom gensim.models.doc2vec import LabeledSentence\nfrom gensim.models import Doc2Vec\nfrom matplotlib import pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom random import shuffle\nfrom sklearn.linear_model import LogisticRegression\nfrom yelp_labeled_line_sentence import YelpLabeledLineSentence\nfrom imdb_labeled_line_sentence import IMDBLabeledLineSentence\nfrom sklearn.linear_model import SGDClassifier\nimport numpy\nimport json\nimport time\nimport os\nimport sys\nimport csv\ndirname = os.path.dirname(__file__)\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\nyelp_model = Doc2Vec.load(os.path.join(dirname, 'models/yelp_model.d2v'))\nyelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'good', 12500)\nyelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'bad', 12500)\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n",
"step-5": "# ARGS:\n# 1: total train reviews\n# 2: number of iterations (for csv output)\n# 3: size of vector\n# 4: good/bad sizes\n\n# import dependencies\nfrom gensim import utils\nfrom gensim.models.doc2vec import LabeledSentence\nfrom gensim.models import Doc2Vec\nfrom matplotlib import pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom random import shuffle\nfrom sklearn.linear_model import LogisticRegression\nfrom yelp_labeled_line_sentence import YelpLabeledLineSentence\nfrom imdb_labeled_line_sentence import IMDBLabeledLineSentence\nfrom sklearn.linear_model import SGDClassifier\nimport numpy\nimport json\nimport time\nimport os\nimport sys\nimport csv\n\ndirname = os.path.dirname(__file__)\n\ndef compute_accuracy(model, good, bad):\n # load our doc2vec model that we trained\n\n\n # take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n\n # create a logistic regression classifier\n classifier = LogisticRegression()\n\n # take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array\n for i in range((25000/2)):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n\n train_arrays[i] = pos_review\n train_labels[i] = 1\n\n train_arrays[(25000/2) + i] = neg_review\n train_labels[(25000/2) + i] = 0\n\n classifier.fit(train_arrays, train_labels)\n\n\n # take our test reviews from the model, and put them in array, good reviews first, bad reviews second half of array\n # for each review, we'll infer the review's vector against our model\n\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if(classifier.predict([test_arrays[i]]) == 1):\n good_correct += 1\n # test_ratings_good[i] = review[1][2]\n\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if(classifier.predict([test_arrays[i + 12500]]) == 0):\n bad_correct += 1\n\n # test_ratings_bad[i] = review[1][2]\n\n # print the accuracy of our classifier\n # accuracy=classifier.score(test_arrays_good, test_labels_good) * 100\n # print(\"Classifier reports a {}% accuracy for good reviews\".format(accuracy))\n #\n # accuracy=classifier.score(test_arrays_bad, test_labels_bad) * 100\n # print(\"Classifier reports a {}% accuracy for bad reviews\".format(accuracy))\n #\n accuracy=classifier.score(test_arrays, test_labels) * 100\n print(\"Classifier reports a {}% accuracy\".format(accuracy))\n\n\n print(\"{} Good correctly identified\".format(good_correct))\n print(\"{} Bad correctly identified\".format(bad_correct))\n\n # for dim in range(1, int(sys.argv[3])):\n # # plot probability of review being good vs feature vector value\n # plt.scatter(test_arrays_good[:,dim], classifier.predict_proba(test_arrays_good)[:,1], color='green')\n # plt.scatter(test_arrays_bad[:,dim], classifier.predict_proba(test_arrays_bad)[:,1], color='red')\n #\n # plt.ylabel('Probability of Review Being Good')\n # plt.xlabel('dim={}'.format(dim))\n # plt.show()\n\n # # reduce the n-dimensional feature vector to n=1 using t-SNE\n # tsne = TSNE(n_components=1)\n # test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)\n # test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)\n #\n # # plot probability of review being good vs feature vector value\n # plt.scatter(test_arrays_tsne_good, classifier.predict_proba(test_arrays_good)[:,1], color='green')\n # plt.scatter(test_arrays_tsne_bad, classifier.predict_proba(test_arrays_bad)[:,1], color='red')\n #\n # plt.ylabel('Probability of Review Being Good')\n # plt.xlabel('t-SNE reduced feature vector (dim=1)')\n # plt.show()\n\n # # reduce the n-dimensional feature vector to n=1 using t-SNE\n # tsne = TSNE(n_components=2)\n # test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)\n # test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)\n #\n # # plot feature vectors against each other\n # plt.scatter(test_arrays_tsne_good[:,0], test_arrays_tsne_good[:,1], color='green')\n # plt.scatter(test_arrays_tsne_bad[:,0], test_arrays_tsne_bad[:,1], color='red')\n #\n # plt.ylabel('x1')\n # plt.xlabel('x2')\n # plt.show()\n\n\nyelp_model = Doc2Vec.load(os.path.join(dirname,'models/yelp_model.d2v'))\n# imdb_model = Doc2Vec.load(os.path.join(dirname,'models/imdb_model.d2v'))\n\n# create an array of LabeledLineSentences for previously unseen\n# good and bad reviews\n# this does some basic formatting of the text as well to make it more\n# digestible by gensim and sklearn\nyelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'good', 12500)\nyelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'bad', 12500)\n\n# imdb_sources_good = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/pos'):'good'})\n# imdb_sources_bad = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/neg'):'bad'})\n\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n# compute_accuracy(imdb_model, imdb_sources_good, imdb_sources_bad)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in stack_numbers:
stacks.append([])
for line in stacks_input_lines[:-1]:
for stack_index, i in enumerate(range(1, len(line), 4)):
crate = line[i]
if crate != ' ':
stacks[stack_index].insert(0, crate)
for instruction in instructions.strip().split('\n'):
_move, crate_count, _from, from_stack_index, _to, to_stack_index = (
instruction.split())
crate_count = int(crate_count)
from_stack_index = int(from_stack_index) - 1
to_stack_index = int(to_stack_index) - 1
crates = stacks[from_stack_index][-crate_count:]
stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]
stacks[to_stack_index].extend(reversed(crates))
<|reserved_special_token_0|>
for stack in stacks:
result += stack[-1]
print(result)
<|reserved_special_token_1|>
input = open('input').read()
stacks_input, instructions = input.split('\n\n')
stacks_input_lines = stacks_input.split('\n')
stack_numbers = map(int, stacks_input_lines[-1].split())
stacks = []
for _ in stack_numbers:
stacks.append([])
for line in stacks_input_lines[:-1]:
for stack_index, i in enumerate(range(1, len(line), 4)):
crate = line[i]
if crate != ' ':
stacks[stack_index].insert(0, crate)
for instruction in instructions.strip().split('\n'):
_move, crate_count, _from, from_stack_index, _to, to_stack_index = (
instruction.split())
crate_count = int(crate_count)
from_stack_index = int(from_stack_index) - 1
to_stack_index = int(to_stack_index) - 1
crates = stacks[from_stack_index][-crate_count:]
stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]
stacks[to_stack_index].extend(reversed(crates))
result = ''
for stack in stacks:
result += stack[-1]
print(result)
<|reserved_special_token_1|>
input = open('input').read()
stacks_input, instructions = input.split('\n\n')
stacks_input_lines = stacks_input.split('\n')
stack_numbers = map(int, stacks_input_lines[-1].split())
stacks = []
for _ in stack_numbers:
stacks.append([])
for line in stacks_input_lines[:-1]:
for stack_index, i in enumerate(range(1, len(line), 4)):
crate = line[i]
if crate != ' ':
stacks[stack_index].insert(0, crate)
for instruction in instructions.strip().split('\n'):
_move, crate_count, _from, from_stack_index, _to, to_stack_index = instruction.split()
crate_count = int(crate_count)
from_stack_index = int(from_stack_index) - 1
to_stack_index = int(to_stack_index) - 1
crates = stacks[from_stack_index][-crate_count:]
stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]
stacks[to_stack_index].extend(reversed(crates))
result = ''
for stack in stacks:
result += stack[-1]
print(result)
|
flexible
|
{
"blob_id": "4927a440093e822250af25dfd6a2ce62d7cc099e",
"index": 8786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in stack_numbers:\n stacks.append([])\nfor line in stacks_input_lines[:-1]:\n for stack_index, i in enumerate(range(1, len(line), 4)):\n crate = line[i]\n if crate != ' ':\n stacks[stack_index].insert(0, crate)\nfor instruction in instructions.strip().split('\\n'):\n _move, crate_count, _from, from_stack_index, _to, to_stack_index = (\n instruction.split())\n crate_count = int(crate_count)\n from_stack_index = int(from_stack_index) - 1\n to_stack_index = int(to_stack_index) - 1\n crates = stacks[from_stack_index][-crate_count:]\n stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]\n stacks[to_stack_index].extend(reversed(crates))\n<mask token>\nfor stack in stacks:\n result += stack[-1]\nprint(result)\n",
"step-3": "input = open('input').read()\nstacks_input, instructions = input.split('\\n\\n')\nstacks_input_lines = stacks_input.split('\\n')\nstack_numbers = map(int, stacks_input_lines[-1].split())\nstacks = []\nfor _ in stack_numbers:\n stacks.append([])\nfor line in stacks_input_lines[:-1]:\n for stack_index, i in enumerate(range(1, len(line), 4)):\n crate = line[i]\n if crate != ' ':\n stacks[stack_index].insert(0, crate)\nfor instruction in instructions.strip().split('\\n'):\n _move, crate_count, _from, from_stack_index, _to, to_stack_index = (\n instruction.split())\n crate_count = int(crate_count)\n from_stack_index = int(from_stack_index) - 1\n to_stack_index = int(to_stack_index) - 1\n crates = stacks[from_stack_index][-crate_count:]\n stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]\n stacks[to_stack_index].extend(reversed(crates))\nresult = ''\nfor stack in stacks:\n result += stack[-1]\nprint(result)\n",
"step-4": "input = open('input').read()\n\nstacks_input, instructions = input.split('\\n\\n')\nstacks_input_lines = stacks_input.split('\\n')\nstack_numbers = map(int, stacks_input_lines[-1].split())\nstacks = []\nfor _ in stack_numbers:\n stacks.append([])\nfor line in stacks_input_lines[:-1]:\n for stack_index, i in enumerate(range(1, len(line), 4)):\n crate = line[i]\n if crate != ' ':\n stacks[stack_index].insert(0, crate)\n\nfor instruction in instructions.strip().split('\\n'):\n _move, crate_count, _from, from_stack_index, _to, to_stack_index = instruction.split()\n crate_count = int(crate_count)\n from_stack_index = int(from_stack_index) - 1\n to_stack_index = int(to_stack_index) - 1\n crates = stacks[from_stack_index][-crate_count:]\n stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]\n stacks[to_stack_index].extend(reversed(crates))\n\nresult = ''\nfor stack in stacks:\n result += stack[-1]\n\nprint(result)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=
session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.skipif(not fs_supports_symlink(), reason='symlink not supported')
@pytest.mark.parametrize('case', ['mixed', 'lower', 'upper'])
def test_discovery_via_path(monkeypatch, case, tmp_path, caplog,
session_app_data):
caplog.set_level(logging.DEBUG)
current = PythonInfo.current_system(session_app_data)
core = (
f"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}"
)
name = 'somethingVeryCryptic'
if case == 'lower':
name = name.lower()
elif case == 'upper':
name = name.upper()
exe_name = (
f"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}"
)
target = tmp_path / current.install_path('scripts')
target.mkdir(parents=True)
executable = target / exe_name
os.symlink(sys.executable, str(executable))
pyvenv_cfg = Path(sys.executable).parents[1] / 'pyvenv.cfg'
if pyvenv_cfg.exists():
(target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())
new_path = os.pathsep.join([str(target), *os.environ.get('PATH', '').
split(os.pathsep)])
monkeypatch.setenv('PATH', new_path)
interpreter = get_interpreter(core, [])
assert interpreter is not None
def test_discovery_via_path_not_found(tmp_path, monkeypatch):
monkeypatch.setenv('PATH', str(tmp_path))
interpreter = get_interpreter(uuid4().hex, [])
assert interpreter is None
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=
session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
<|reserved_special_token_0|>
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[
], python=['magic-one', sys.executable], env=os.environ))
result = builtin.run()
assert result is not None, caplog.text
assert result.executable == sys.executable, caplog.text
assert 'accepted' in caplog.text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.skipif(not fs_supports_symlink(), reason='symlink not supported')
@pytest.mark.parametrize('case', ['mixed', 'lower', 'upper'])
def test_discovery_via_path(monkeypatch, case, tmp_path, caplog,
session_app_data):
caplog.set_level(logging.DEBUG)
current = PythonInfo.current_system(session_app_data)
core = (
f"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}"
)
name = 'somethingVeryCryptic'
if case == 'lower':
name = name.lower()
elif case == 'upper':
name = name.upper()
exe_name = (
f"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}"
)
target = tmp_path / current.install_path('scripts')
target.mkdir(parents=True)
executable = target / exe_name
os.symlink(sys.executable, str(executable))
pyvenv_cfg = Path(sys.executable).parents[1] / 'pyvenv.cfg'
if pyvenv_cfg.exists():
(target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())
new_path = os.pathsep.join([str(target), *os.environ.get('PATH', '').
split(os.pathsep)])
monkeypatch.setenv('PATH', new_path)
interpreter = get_interpreter(core, [])
assert interpreter is not None
def test_discovery_via_path_not_found(tmp_path, monkeypatch):
monkeypatch.setenv('PATH', str(tmp_path))
interpreter = get_interpreter(uuid4().hex, [])
assert interpreter is None
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=
session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
def test_discovery_fallback_fail(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[
], python=['magic-one', 'magic-two'], env=os.environ))
result = builtin.run()
assert result is None
assert 'accepted' not in caplog.text
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[
], python=['magic-one', sys.executable], env=os.environ))
result = builtin.run()
assert result is not None, caplog.text
assert result.executable == sys.executable, caplog.text
assert 'accepted' in caplog.text
<|reserved_special_token_1|>
from __future__ import annotations
import logging
import os
import sys
from argparse import Namespace
from pathlib import Path
from uuid import uuid4
import pytest
from virtualenv.discovery.builtin import Builtin, get_interpreter
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import fs_supports_symlink
@pytest.mark.skipif(not fs_supports_symlink(), reason='symlink not supported')
@pytest.mark.parametrize('case', ['mixed', 'lower', 'upper'])
def test_discovery_via_path(monkeypatch, case, tmp_path, caplog,
session_app_data):
caplog.set_level(logging.DEBUG)
current = PythonInfo.current_system(session_app_data)
core = (
f"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}"
)
name = 'somethingVeryCryptic'
if case == 'lower':
name = name.lower()
elif case == 'upper':
name = name.upper()
exe_name = (
f"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}"
)
target = tmp_path / current.install_path('scripts')
target.mkdir(parents=True)
executable = target / exe_name
os.symlink(sys.executable, str(executable))
pyvenv_cfg = Path(sys.executable).parents[1] / 'pyvenv.cfg'
if pyvenv_cfg.exists():
(target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())
new_path = os.pathsep.join([str(target), *os.environ.get('PATH', '').
split(os.pathsep)])
monkeypatch.setenv('PATH', new_path)
interpreter = get_interpreter(core, [])
assert interpreter is not None
def test_discovery_via_path_not_found(tmp_path, monkeypatch):
monkeypatch.setenv('PATH', str(tmp_path))
interpreter = get_interpreter(uuid4().hex, [])
assert interpreter is None
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=
session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
def test_discovery_fallback_fail(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[
], python=['magic-one', 'magic-two'], env=os.environ))
result = builtin.run()
assert result is None
assert 'accepted' not in caplog.text
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[
], python=['magic-one', sys.executable], env=os.environ))
result = builtin.run()
assert result is not None, caplog.text
assert result.executable == sys.executable, caplog.text
assert 'accepted' in caplog.text
<|reserved_special_token_1|>
from __future__ import annotations
import logging
import os
import sys
from argparse import Namespace
from pathlib import Path
from uuid import uuid4
import pytest
from virtualenv.discovery.builtin import Builtin, get_interpreter
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import fs_supports_symlink
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink not supported")
@pytest.mark.parametrize("case", ["mixed", "lower", "upper"])
def test_discovery_via_path(monkeypatch, case, tmp_path, caplog, session_app_data):
caplog.set_level(logging.DEBUG)
current = PythonInfo.current_system(session_app_data)
core = f"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}"
name = "somethingVeryCryptic"
if case == "lower":
name = name.lower()
elif case == "upper":
name = name.upper()
exe_name = f"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}"
target = tmp_path / current.install_path("scripts")
target.mkdir(parents=True)
executable = target / exe_name
os.symlink(sys.executable, str(executable))
pyvenv_cfg = Path(sys.executable).parents[1] / "pyvenv.cfg"
if pyvenv_cfg.exists():
(target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())
new_path = os.pathsep.join([str(target), *os.environ.get("PATH", "").split(os.pathsep)])
monkeypatch.setenv("PATH", new_path)
interpreter = get_interpreter(core, [])
assert interpreter is not None
def test_discovery_via_path_not_found(tmp_path, monkeypatch):
monkeypatch.setenv("PATH", str(tmp_path))
interpreter = get_interpreter(uuid4().hex, [])
assert interpreter is None
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
def test_discovery_fallback_fail(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(
Namespace(app_data=session_app_data, try_first_with=[], python=["magic-one", "magic-two"], env=os.environ),
)
result = builtin.run()
assert result is None
assert "accepted" not in caplog.text
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(
Namespace(app_data=session_app_data, try_first_with=[], python=["magic-one", sys.executable], env=os.environ),
)
result = builtin.run()
assert result is not None, caplog.text
assert result.executable == sys.executable, caplog.text
assert "accepted" in caplog.text
|
flexible
|
{
"blob_id": "55d4f4bba2b72ec93cb883527d2a9c2ebe8ec337",
"index": 4910,
"step-1": "<mask token>\n\n\ndef test_relative_path(session_app_data, monkeypatch):\n sys_executable = Path(PythonInfo.current_system(app_data=\n session_app_data).system_executable)\n cwd = sys_executable.parents[1]\n monkeypatch.chdir(str(cwd))\n relative = str(sys_executable.relative_to(cwd))\n result = get_interpreter(relative, [], session_app_data)\n assert result is not None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.skipif(not fs_supports_symlink(), reason='symlink not supported')\n@pytest.mark.parametrize('case', ['mixed', 'lower', 'upper'])\ndef test_discovery_via_path(monkeypatch, case, tmp_path, caplog,\n session_app_data):\n caplog.set_level(logging.DEBUG)\n current = PythonInfo.current_system(session_app_data)\n core = (\n f\"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}\"\n )\n name = 'somethingVeryCryptic'\n if case == 'lower':\n name = name.lower()\n elif case == 'upper':\n name = name.upper()\n exe_name = (\n f\"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}\"\n )\n target = tmp_path / current.install_path('scripts')\n target.mkdir(parents=True)\n executable = target / exe_name\n os.symlink(sys.executable, str(executable))\n pyvenv_cfg = Path(sys.executable).parents[1] / 'pyvenv.cfg'\n if pyvenv_cfg.exists():\n (target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())\n new_path = os.pathsep.join([str(target), *os.environ.get('PATH', '').\n split(os.pathsep)])\n monkeypatch.setenv('PATH', new_path)\n interpreter = get_interpreter(core, [])\n assert interpreter is not None\n\n\ndef test_discovery_via_path_not_found(tmp_path, monkeypatch):\n monkeypatch.setenv('PATH', str(tmp_path))\n interpreter = get_interpreter(uuid4().hex, [])\n assert interpreter is None\n\n\ndef test_relative_path(session_app_data, monkeypatch):\n sys_executable = Path(PythonInfo.current_system(app_data=\n session_app_data).system_executable)\n cwd = sys_executable.parents[1]\n monkeypatch.chdir(str(cwd))\n relative = str(sys_executable.relative_to(cwd))\n result = get_interpreter(relative, [], session_app_data)\n assert result is not None\n\n\n<mask token>\n\n\ndef test_discovery_fallback_ok(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[\n ], python=['magic-one', sys.executable], env=os.environ))\n result = builtin.run()\n assert result is not None, caplog.text\n assert result.executable == sys.executable, caplog.text\n assert 'accepted' in caplog.text\n",
"step-3": "<mask token>\n\n\n@pytest.mark.skipif(not fs_supports_symlink(), reason='symlink not supported')\n@pytest.mark.parametrize('case', ['mixed', 'lower', 'upper'])\ndef test_discovery_via_path(monkeypatch, case, tmp_path, caplog,\n session_app_data):\n caplog.set_level(logging.DEBUG)\n current = PythonInfo.current_system(session_app_data)\n core = (\n f\"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}\"\n )\n name = 'somethingVeryCryptic'\n if case == 'lower':\n name = name.lower()\n elif case == 'upper':\n name = name.upper()\n exe_name = (\n f\"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}\"\n )\n target = tmp_path / current.install_path('scripts')\n target.mkdir(parents=True)\n executable = target / exe_name\n os.symlink(sys.executable, str(executable))\n pyvenv_cfg = Path(sys.executable).parents[1] / 'pyvenv.cfg'\n if pyvenv_cfg.exists():\n (target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())\n new_path = os.pathsep.join([str(target), *os.environ.get('PATH', '').\n split(os.pathsep)])\n monkeypatch.setenv('PATH', new_path)\n interpreter = get_interpreter(core, [])\n assert interpreter is not None\n\n\ndef test_discovery_via_path_not_found(tmp_path, monkeypatch):\n monkeypatch.setenv('PATH', str(tmp_path))\n interpreter = get_interpreter(uuid4().hex, [])\n assert interpreter is None\n\n\ndef test_relative_path(session_app_data, monkeypatch):\n sys_executable = Path(PythonInfo.current_system(app_data=\n session_app_data).system_executable)\n cwd = sys_executable.parents[1]\n monkeypatch.chdir(str(cwd))\n relative = str(sys_executable.relative_to(cwd))\n result = get_interpreter(relative, [], session_app_data)\n assert result is not None\n\n\ndef test_discovery_fallback_fail(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[\n ], python=['magic-one', 'magic-two'], env=os.environ))\n result = builtin.run()\n assert result is None\n assert 'accepted' not in caplog.text\n\n\ndef test_discovery_fallback_ok(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[\n ], python=['magic-one', sys.executable], env=os.environ))\n result = builtin.run()\n assert result is not None, caplog.text\n assert result.executable == sys.executable, caplog.text\n assert 'accepted' in caplog.text\n",
"step-4": "from __future__ import annotations\nimport logging\nimport os\nimport sys\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom uuid import uuid4\nimport pytest\nfrom virtualenv.discovery.builtin import Builtin, get_interpreter\nfrom virtualenv.discovery.py_info import PythonInfo\nfrom virtualenv.info import fs_supports_symlink\n\n\n@pytest.mark.skipif(not fs_supports_symlink(), reason='symlink not supported')\n@pytest.mark.parametrize('case', ['mixed', 'lower', 'upper'])\ndef test_discovery_via_path(monkeypatch, case, tmp_path, caplog,\n session_app_data):\n caplog.set_level(logging.DEBUG)\n current = PythonInfo.current_system(session_app_data)\n core = (\n f\"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}\"\n )\n name = 'somethingVeryCryptic'\n if case == 'lower':\n name = name.lower()\n elif case == 'upper':\n name = name.upper()\n exe_name = (\n f\"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}\"\n )\n target = tmp_path / current.install_path('scripts')\n target.mkdir(parents=True)\n executable = target / exe_name\n os.symlink(sys.executable, str(executable))\n pyvenv_cfg = Path(sys.executable).parents[1] / 'pyvenv.cfg'\n if pyvenv_cfg.exists():\n (target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())\n new_path = os.pathsep.join([str(target), *os.environ.get('PATH', '').\n split(os.pathsep)])\n monkeypatch.setenv('PATH', new_path)\n interpreter = get_interpreter(core, [])\n assert interpreter is not None\n\n\ndef test_discovery_via_path_not_found(tmp_path, monkeypatch):\n monkeypatch.setenv('PATH', str(tmp_path))\n interpreter = get_interpreter(uuid4().hex, [])\n assert interpreter is None\n\n\ndef test_relative_path(session_app_data, monkeypatch):\n sys_executable = Path(PythonInfo.current_system(app_data=\n session_app_data).system_executable)\n cwd = sys_executable.parents[1]\n monkeypatch.chdir(str(cwd))\n relative = str(sys_executable.relative_to(cwd))\n result = get_interpreter(relative, [], session_app_data)\n assert result is not None\n\n\ndef test_discovery_fallback_fail(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[\n ], python=['magic-one', 'magic-two'], env=os.environ))\n result = builtin.run()\n assert result is None\n assert 'accepted' not in caplog.text\n\n\ndef test_discovery_fallback_ok(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[\n ], python=['magic-one', sys.executable], env=os.environ))\n result = builtin.run()\n assert result is not None, caplog.text\n assert result.executable == sys.executable, caplog.text\n assert 'accepted' in caplog.text\n",
"step-5": "from __future__ import annotations\n\nimport logging\nimport os\nimport sys\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom uuid import uuid4\n\nimport pytest\n\nfrom virtualenv.discovery.builtin import Builtin, get_interpreter\nfrom virtualenv.discovery.py_info import PythonInfo\nfrom virtualenv.info import fs_supports_symlink\n\n\n@pytest.mark.skipif(not fs_supports_symlink(), reason=\"symlink not supported\")\n@pytest.mark.parametrize(\"case\", [\"mixed\", \"lower\", \"upper\"])\ndef test_discovery_via_path(monkeypatch, case, tmp_path, caplog, session_app_data):\n caplog.set_level(logging.DEBUG)\n current = PythonInfo.current_system(session_app_data)\n core = f\"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}\"\n name = \"somethingVeryCryptic\"\n if case == \"lower\":\n name = name.lower()\n elif case == \"upper\":\n name = name.upper()\n exe_name = f\"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}\"\n target = tmp_path / current.install_path(\"scripts\")\n target.mkdir(parents=True)\n executable = target / exe_name\n os.symlink(sys.executable, str(executable))\n pyvenv_cfg = Path(sys.executable).parents[1] / \"pyvenv.cfg\"\n if pyvenv_cfg.exists():\n (target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())\n new_path = os.pathsep.join([str(target), *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\n monkeypatch.setenv(\"PATH\", new_path)\n interpreter = get_interpreter(core, [])\n\n assert interpreter is not None\n\n\ndef test_discovery_via_path_not_found(tmp_path, monkeypatch):\n monkeypatch.setenv(\"PATH\", str(tmp_path))\n interpreter = get_interpreter(uuid4().hex, [])\n assert interpreter is None\n\n\ndef test_relative_path(session_app_data, monkeypatch):\n sys_executable = Path(PythonInfo.current_system(app_data=session_app_data).system_executable)\n cwd = sys_executable.parents[1]\n monkeypatch.chdir(str(cwd))\n relative = str(sys_executable.relative_to(cwd))\n result = get_interpreter(relative, [], session_app_data)\n assert result is not None\n\n\ndef test_discovery_fallback_fail(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(\n Namespace(app_data=session_app_data, try_first_with=[], python=[\"magic-one\", \"magic-two\"], env=os.environ),\n )\n\n result = builtin.run()\n assert result is None\n\n assert \"accepted\" not in caplog.text\n\n\ndef test_discovery_fallback_ok(session_app_data, caplog):\n caplog.set_level(logging.DEBUG)\n builtin = Builtin(\n Namespace(app_data=session_app_data, try_first_with=[], python=[\"magic-one\", sys.executable], env=os.environ),\n )\n\n result = builtin.run()\n assert result is not None, caplog.text\n assert result.executable == sys.executable, caplog.text\n\n assert \"accepted\" in caplog.text\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
'''
You're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll.
Assuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win?
This question was asked in a data scientist interview at Tinder.
'''
import numpy as np
for threshold in range(1, 6):
rolls = np.random.randint(1, 7, size=10**7)
rerolls = np.random.randint(1, 7, size=10**7)
avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))
print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')
|
normal
|
{
"blob_id": "e5d704541acd0f68a7885d7323118e1552e064c9",
"index": 6170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10 ** 7)\n rerolls = np.random.randint(1, 7, size=10 ** 7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(\n f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'\n )\n",
"step-3": "<mask token>\nimport numpy as np\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10 ** 7)\n rerolls = np.random.randint(1, 7, size=10 ** 7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(\n f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'\n )\n",
"step-4": "'''\nYou're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll.\n\nAssuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win?\n\nThis question was asked in a data scientist interview at Tinder.\n'''\n\nimport numpy as np\n\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10**7)\n rerolls = np.random.randint(1, 7, size=10**7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '
)
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
<|reserved_special_token_0|>
def vocab_length():
return voc_len
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '
)
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
<|reserved_special_token_0|>
def get_small_bag():
bag = []
for sent in sents:
sbag = []
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent
).split(' '):
sbag.append(word_to_ind[wor])
bag.append(sbag)
return bag
<|reserved_special_token_0|>
def get_ryans_strange_input():
vec = []
for l in sents:
vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))
return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])
def vocab_length():
return voc_len
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '
)
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
<|reserved_special_token_0|>
def get_small_bag():
bag = []
for sent in sents:
sbag = []
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent
).split(' '):
sbag.append(word_to_ind[wor])
bag.append(sbag)
return bag
def get_bag():
bag = np.zeros(shape)
for j, sent in enumerate(sents):
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent
).split(' '):
bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1
return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)
def string_to_vec(string):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(
' ')
vec = np.zeros(voc_len)
for wor in tokens:
vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1
return vec
def get_ryans_strange_input():
vec = []
for l in sents:
vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))
return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])
def vocab_length():
return voc_len
<|reserved_special_token_1|>
<|reserved_special_token_0|>
here = os.path.dirname(os.path.abspath(__file__))
sents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os
.path.join(here, 'shake_sentences.txt')))
thresh = 5
col = dict()
word_to_ind = dict()
ind_to_word = dict()
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '
)
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
for l in sents:
__line_into_col__(l)
lis = list(col.items())
lis.sort(key=lambda count: count[1], reverse=True)
for i, word in enumerate(lis):
word_to_ind[word[0]] = i
ind_to_word[i] = word[0]
voc_len = len(word_to_ind)
shape = len(sents), voc_len
def get_small_bag():
bag = []
for sent in sents:
sbag = []
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent
).split(' '):
sbag.append(word_to_ind[wor])
bag.append(sbag)
return bag
def get_bag():
bag = np.zeros(shape)
for j, sent in enumerate(sents):
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent
).split(' '):
bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1
return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)
def string_to_vec(string):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(
' ')
vec = np.zeros(voc_len)
for wor in tokens:
vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1
return vec
def get_ryans_strange_input():
vec = []
for l in sents:
vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))
return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])
def vocab_length():
return voc_len
<|reserved_special_token_1|>
import numpy as np
import dl_style_transfer.workspace.data_helpers
import os
here = os.path.dirname(os.path.abspath(__file__))
sents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt')))
thresh = 5
col = dict()
word_to_ind = dict()
ind_to_word = dict()
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(" ")
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
for l in sents:
__line_into_col__(l)
lis = list(col.items())
lis.sort(key=lambda count: count[1], reverse=True)
for i, word in enumerate(lis):
word_to_ind[word[0]] = i
ind_to_word[i] = word[0]
voc_len = len(word_to_ind)
shape = (len(sents), voc_len)
def get_small_bag():
bag = []
for sent in sents:
sbag =[]
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "):
sbag.append(word_to_ind[wor])
bag.append(sbag)
return bag
def get_bag():
bag = np.zeros(shape)
for j,sent in enumerate(sents):
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "):
bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1
return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)
def string_to_vec(string):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(" ")
vec = np.zeros(voc_len)
for wor in tokens:
vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1
return vec
def get_ryans_strange_input():
vec = []
for l in sents:
vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))
return np.array([word_to_ind[i] for l in vec for i in l.split(" ")])
def vocab_length():
return voc_len
|
flexible
|
{
"blob_id": "2317a2fff493588ad6cc3a4ac2b600fbf1c5583c",
"index": 8594,
"step-1": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef vocab_length():\n return voc_len\n",
"step-2": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\n<mask token>\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n",
"step-3": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\ndef get_bag():\n bag = np.zeros(shape)\n for j, sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\n ' ')\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n",
"step-4": "<mask token>\nhere = os.path.dirname(os.path.abspath(__file__))\nsents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os\n .path.join(here, 'shake_sentences.txt')))\nthresh = 5\ncol = dict()\nword_to_ind = dict()\nind_to_word = dict()\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\nfor l in sents:\n __line_into_col__(l)\nlis = list(col.items())\nlis.sort(key=lambda count: count[1], reverse=True)\nfor i, word in enumerate(lis):\n word_to_ind[word[0]] = i\n ind_to_word[i] = word[0]\nvoc_len = len(word_to_ind)\nshape = len(sents), voc_len\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\ndef get_bag():\n bag = np.zeros(shape)\n for j, sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\n ' ')\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n",
"step-5": "import numpy as np\nimport dl_style_transfer.workspace.data_helpers\nimport os\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\nsents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt')))\n\nthresh = 5\n\ncol = dict()\nword_to_ind = dict()\nind_to_word = dict()\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(\" \")\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\nfor l in sents:\n __line_into_col__(l)\n\nlis = list(col.items())\nlis.sort(key=lambda count: count[1], reverse=True)\nfor i, word in enumerate(lis):\n word_to_ind[word[0]] = i\n ind_to_word[i] = word[0]\n\nvoc_len = len(word_to_ind)\n\nshape = (len(sents), voc_len)\n\ndef get_small_bag():\n\tbag = []\n\tfor sent in sents:\n\t\tsbag =[]\n\t\tfor wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(\" \"):\n\t\t\tsbag.append(word_to_ind[wor])\n\t\tbag.append(sbag)\n\treturn bag\n\t\t\t\n\t\t\t\n\ndef get_bag():\n bag = np.zeros(shape)\n for j,sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(\" \"):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\" \")\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(\" \")])\n\n\ndef vocab_length():\n return voc_len\n",
"step-ids": [
2,
4,
6,
8,
10
]
}
|
[
2,
4,
6,
8,
10
] |
<|reserved_special_token_0|>
class WRITE_TO_FILE(tarr.compiler_base.Instruction):
@property
def __name__(self):
return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename)
def __init__(self, filename, formatter=format_data):
self.format = formatter
self.filename = filename
def run(self, runner, data):
with open(self.filename, 'ab', buffering=0) as f:
f.write(self.format(data) + '\n')
return data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WRITE_TO_FILE(tarr.compiler_base.Instruction):
@property
def __name__(self):
return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename)
def __init__(self, filename, formatter=format_data):
self.format = formatter
self.filename = filename
def run(self, runner, data):
with open(self.filename, 'ab', buffering=0) as f:
f.write(self.format(data) + '\n')
return data
def clone(self):
return self.__class__(filename=self.filename, formatter=self.format)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def format_data(data):
return '{0.id}: {0.payload}'.format(data)
class WRITE_TO_FILE(tarr.compiler_base.Instruction):
@property
def __name__(self):
return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename)
def __init__(self, filename, formatter=format_data):
self.format = formatter
self.filename = filename
def run(self, runner, data):
with open(self.filename, 'ab', buffering=0) as f:
f.write(self.format(data) + '\n')
return data
def clone(self):
return self.__class__(filename=self.filename, formatter=self.format)
<|reserved_special_token_1|>
import tarr.compiler_base
def format_data(data):
return '{0.id}: {0.payload}'.format(data)
class WRITE_TO_FILE(tarr.compiler_base.Instruction):
@property
def __name__(self):
return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename)
def __init__(self, filename, formatter=format_data):
self.format = formatter
self.filename = filename
def run(self, runner, data):
with open(self.filename, 'ab', buffering=0) as f:
f.write(self.format(data) + '\n')
return data
def clone(self):
return self.__class__(filename=self.filename, formatter=self.format)
<|reserved_special_token_1|>
# drop data to file filter
import tarr.compiler_base
def format_data(data):
return '{0.id}: {0.payload}'.format(data)
class WRITE_TO_FILE(tarr.compiler_base.Instruction):
@property
def __name__(self):
return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename)
def __init__(self, filename, formatter=format_data):
self.format = formatter
self.filename = filename
def run(self, runner, data):
# NOTE: we need to do writing in UNBUFFERED mode (buffering=0)
# as potentially there are other processes writing to the same file
# *NOW*
with open(self.filename, 'ab', buffering=0) as f:
f.write(self.format(data) + '\n')
return data
def clone(self):
return self.__class__(filename=self.filename, formatter=self.format)
|
flexible
|
{
"blob_id": "75393d39b147097a7ac1d82938ac102491ea9441",
"index": 8469,
"step-1": "<mask token>\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n",
"step-3": "<mask token>\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n",
"step-4": "import tarr.compiler_base\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n",
"step-5": "# drop data to file filter\nimport tarr.compiler_base\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n # NOTE: we need to do writing in UNBUFFERED mode (buffering=0)\n # as potentially there are other processes writing to the same file\n # *NOW*\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class _ab_test_plotting(_ab_test_utils):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def plot_positive_lift(self, variant_one, variant_two):
"""Plot the positive lift pdt between variant_one and variant_two.
Arguments:
variant_one and variant_two should not be the same
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if (variant_one != self.control_bucket_name and variant_two != self
.control_bucket_name):
if not self.compare_variants:
raise RuntimeError(
'Compare_variants must be set to true in order to compare {0} and {1}'
.format(variant_one, variant_two))
if variant_one in self.lift.keys() and variant_two in self.lift[
variant_one].keys():
self._plot_positive_lift(numerator_name=variant_one,
denominator_name=variant_two)
else:
self._plot_positive_lift(numerator_name=variant_two,
denominator_name=variant_one)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _ab_test_plotting(_ab_test_utils):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _plot_ecdf(self, numerator_name, denominator_name):
"""Plot the empirical cumulative distribution function.
This is a private function. For a public interface, see
plot_ecdf().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
x = self.ecdf[numerator_name][denominator_name]['x']
y = self.ecdf[numerator_name][denominator_name]['y']
lower_bound = x[y.index(min(y, key=lambda x: abs(x - self.
confidence_level)))]
median = x[y.index(min(y, key=lambda x: abs(x - 0.5)))]
upper_bound = x[y.index(min(y, key=lambda x: abs(x - (1 - self.
confidence_level))))]
sns.lineplot(x=x, y=y)
ci = 1 - self.confidence_level
title = (
'Median Lift was {0:.2%}, with a {1:.0%} CI of [{2:.2%}, {3:.2%}]'
.format(median, ci, lower_bound, upper_bound))
title = self._format_title(title)
plt.title(title)
plt.xlabel('Lift')
plt.ylabel('Cumulative Probability')
plt.axvline(x=lower_bound, linestyle='dotted', color='black')
plt.axvline(x=median, linestyle='dotted', color='black')
plt.axvline(x=upper_bound, linestyle='dotted', color='black')
sns.despine(left=True)
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def plot_posteriors(self, variants=[]):
"""Plot the PDFs of the posterior distributions.
Arguments:
variants {list} -- List of variant names to be plotted.
If variants is not set, all are plotted, otherwise, the variants
in the list are plotted. Variants must only have items in
bucket_col_name (default: {[]}).
"""
if variants != []:
for var in variants:
if var not in self.posteriors.keys():
raise ValueError(
'Variants must only be a value in bucket_col_name')
self._plot_posteriors(variants)
def plot_positive_lift(self, variant_one, variant_two):
"""Plot the positive lift pdt between variant_one and variant_two.
Arguments:
variant_one and variant_two should not be the same
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if (variant_one != self.control_bucket_name and variant_two != self
.control_bucket_name):
if not self.compare_variants:
raise RuntimeError(
'Compare_variants must be set to true in order to compare {0} and {1}'
.format(variant_one, variant_two))
if variant_one in self.lift.keys() and variant_two in self.lift[
variant_one].keys():
self._plot_positive_lift(numerator_name=variant_one,
denominator_name=variant_two)
else:
self._plot_positive_lift(numerator_name=variant_two,
denominator_name=variant_one)
def plot_ecdf(self, variant_one, variant_two):
"""Plot the empirical cdf for the lift b/w variant_one and variant_two.
Arguments:
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if variant_one in self.ecdf.keys() and variant_two in self.ecdf[
variant_one].keys():
self._plot_ecdf(numerator_name=variant_one, denominator_name=
variant_two)
plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_two,
variant_one))
else:
self._plot_ecdf(numerator_name=variant_two, denominator_name=
variant_one)
plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_one,
variant_two))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _ab_test_plotting(_ab_test_utils):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _plot_positive_lift(self, numerator_name, denominator_name):
"""Plot the lift vector as a kernel density estimation.
This is a private function. For a public interface, see
plot_positive_lift().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
lift = self.lift[numerator_name][denominator_name]
ax = sns.kdeplot(lift, shade=True)
line = ax.get_lines()[0]
x, y = line.get_data()
mask = x > 0
x, y = x[mask], y[mask]
ax.fill_between(x, y1=y, alpha=0.5, facecolor='red')
if len(self.variant_bucket_names) > 1:
title = numerator_name + ' vs ' + denominator_name
ax.set_ylabel(title, rotation=0, fontstyle='italic')
plt.axvline(x=0, linestyle='dotted', color='black')
plt.xlabel('Lift')
percent_positive_lift = sum(i > 0 for i in lift) / len(lift)
title = '{0} had {1:.2%} probability of positive lift'.format(self.
metric, percent_positive_lift)
title = self._format_title(title)
plt.title(title)
sns.despine(left=True)
plt.yticks([], [])
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _plot_ecdf(self, numerator_name, denominator_name):
"""Plot the empirical cumulative distribution function.
This is a private function. For a public interface, see
plot_ecdf().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
x = self.ecdf[numerator_name][denominator_name]['x']
y = self.ecdf[numerator_name][denominator_name]['y']
lower_bound = x[y.index(min(y, key=lambda x: abs(x - self.
confidence_level)))]
median = x[y.index(min(y, key=lambda x: abs(x - 0.5)))]
upper_bound = x[y.index(min(y, key=lambda x: abs(x - (1 - self.
confidence_level))))]
sns.lineplot(x=x, y=y)
ci = 1 - self.confidence_level
title = (
'Median Lift was {0:.2%}, with a {1:.0%} CI of [{2:.2%}, {3:.2%}]'
.format(median, ci, lower_bound, upper_bound))
title = self._format_title(title)
plt.title(title)
plt.xlabel('Lift')
plt.ylabel('Cumulative Probability')
plt.axvline(x=lower_bound, linestyle='dotted', color='black')
plt.axvline(x=median, linestyle='dotted', color='black')
plt.axvline(x=upper_bound, linestyle='dotted', color='black')
sns.despine(left=True)
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
<|reserved_special_token_0|>
def _calc_lift(self):
"""Calculate the lift of the variants over the others."""
for key, val in self.posteriors.items():
if key == self.control_bucket_name:
continue
lift_over_control = np.divide(val.get_posterior_sample(), self.
posteriors[self.control_bucket_name].get_posterior_sample()
) - 1
if key not in self.lift.keys():
self.lift[key] = {}
self.lift[key][self.control_bucket_name] = lift_over_control
else:
self.lift[key][self.control_bucket_name] = lift_over_control
if self.debug:
percent_positive_lift = sum(i > 0 for i in lift_over_control
) / len(lift_over_control)
print('percent positive lift for {0} over {1} = {2:.2%}'.
format(key, self.control_bucket_name,
percent_positive_lift))
if self.compare_variants:
comparisons = list(range(0, len(self.variant_bucket_names)))
combs = combinations(comparisons, 2)
for combination in combs:
denom = self.posteriors[self.variant_bucket_names[
combination[0]]]
num = self.posteriors[self.variant_bucket_names[combination[1]]
]
lift = np.divide(num.get_posterior_sample(), denom.
get_posterior_sample()) - 1
if num.get_variant_name() not in self.lift.keys():
self.lift[num.get_variant_name()] = {}
self.lift[num.get_variant_name()][denom.get_variant_name()
] = lift
else:
self.lift[num.get_variant_name()][denom.get_variant_name()
] = lift
if self.debug:
percent_positive_lift = sum(i > 0 for i in lift) / len(lift
)
print('percent positive lift for {0} over {1} = {2:.2%}'
.format(num.get_variant_name(), denom.
get_variant_name(), percent_positive_lift))
def plot_posteriors(self, variants=[]):
"""Plot the PDFs of the posterior distributions.
Arguments:
variants {list} -- List of variant names to be plotted.
If variants is not set, all are plotted, otherwise, the variants
in the list are plotted. Variants must only have items in
bucket_col_name (default: {[]}).
"""
if variants != []:
for var in variants:
if var not in self.posteriors.keys():
raise ValueError(
'Variants must only be a value in bucket_col_name')
self._plot_posteriors(variants)
def plot_positive_lift(self, variant_one, variant_two):
"""Plot the positive lift pdt between variant_one and variant_two.
Arguments:
variant_one and variant_two should not be the same
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if (variant_one != self.control_bucket_name and variant_two != self
.control_bucket_name):
if not self.compare_variants:
raise RuntimeError(
'Compare_variants must be set to true in order to compare {0} and {1}'
.format(variant_one, variant_two))
if variant_one in self.lift.keys() and variant_two in self.lift[
variant_one].keys():
self._plot_positive_lift(numerator_name=variant_one,
denominator_name=variant_two)
else:
self._plot_positive_lift(numerator_name=variant_two,
denominator_name=variant_one)
def plot_ecdf(self, variant_one, variant_two):
"""Plot the empirical cdf for the lift b/w variant_one and variant_two.
Arguments:
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if variant_one in self.ecdf.keys() and variant_two in self.ecdf[
variant_one].keys():
self._plot_ecdf(numerator_name=variant_one, denominator_name=
variant_two)
plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_two,
variant_one))
else:
self._plot_ecdf(numerator_name=variant_two, denominator_name=
variant_one)
plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_one,
variant_two))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _ab_test_plotting(_ab_test_utils):
"""Provide Funcs for class to plot Bayesian charts."""
def _plot_posteriors(self, variants=[]):
"""Plot KDE of the posterior samples.
This is a private function. For a public interface, see
plot_posteriors().
Keyword Arguments:
variants {list} -- which variants to plot. If empty, all are
plotted. Otherwise, the must be contained in raw_data
(default: {[]}).
"""
if variants == []:
variants = list(self.posteriors.keys())
for variant in variants:
sns.kdeplot(self.posteriors[variant].get_posterior_sample(),
shade=True, color=self.posteriors[variant].get_color())
plt.legend(labels=variants, loc='upper right')
if self.prior_function == 'beta':
plt.xlabel('Conversion Rate')
elif self.prior_function == 'log-normal' or self.prior_function == 'normal':
plt.xlabel(self.metric)
sns.despine(left=True)
plt.yticks([], [])
title = 'Distribution(s) for {0} for {1}'.format(self.
_stringify_variants(variants), self.metric)
title = self._format_title(title)
plt.title(title)
if self.prior_function == 'beta':
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _plot_positive_lift(self, numerator_name, denominator_name):
"""Plot the lift vector as a kernel density estimation.
This is a private function. For a public interface, see
plot_positive_lift().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
lift = self.lift[numerator_name][denominator_name]
ax = sns.kdeplot(lift, shade=True)
line = ax.get_lines()[0]
x, y = line.get_data()
mask = x > 0
x, y = x[mask], y[mask]
ax.fill_between(x, y1=y, alpha=0.5, facecolor='red')
if len(self.variant_bucket_names) > 1:
title = numerator_name + ' vs ' + denominator_name
ax.set_ylabel(title, rotation=0, fontstyle='italic')
plt.axvline(x=0, linestyle='dotted', color='black')
plt.xlabel('Lift')
percent_positive_lift = sum(i > 0 for i in lift) / len(lift)
title = '{0} had {1:.2%} probability of positive lift'.format(self.
metric, percent_positive_lift)
title = self._format_title(title)
plt.title(title)
sns.despine(left=True)
plt.yticks([], [])
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _plot_ecdf(self, numerator_name, denominator_name):
"""Plot the empirical cumulative distribution function.
This is a private function. For a public interface, see
plot_ecdf().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
x = self.ecdf[numerator_name][denominator_name]['x']
y = self.ecdf[numerator_name][denominator_name]['y']
lower_bound = x[y.index(min(y, key=lambda x: abs(x - self.
confidence_level)))]
median = x[y.index(min(y, key=lambda x: abs(x - 0.5)))]
upper_bound = x[y.index(min(y, key=lambda x: abs(x - (1 - self.
confidence_level))))]
sns.lineplot(x=x, y=y)
ci = 1 - self.confidence_level
title = (
'Median Lift was {0:.2%}, with a {1:.0%} CI of [{2:.2%}, {3:.2%}]'
.format(median, ci, lower_bound, upper_bound))
title = self._format_title(title)
plt.title(title)
plt.xlabel('Lift')
plt.ylabel('Cumulative Probability')
plt.axvline(x=lower_bound, linestyle='dotted', color='black')
plt.axvline(x=median, linestyle='dotted', color='black')
plt.axvline(x=upper_bound, linestyle='dotted', color='black')
sns.despine(left=True)
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _calc_ecdf(self):
"""Calculate the empirical CDFs and set member var."""
for numerator, vals in self.lift.items():
for denominator, lift in vals.items():
raw_data = np.array(lift)
cdfx = np.sort(np.unique(lift))
x_values = np.linspace(start=min(cdfx), stop=max(cdfx), num
=len(cdfx))
size_data = raw_data.size
y_values = []
for i in x_values:
temp = raw_data[raw_data <= i]
value = temp.size / size_data
y_values.append(value)
temp = {}
temp['x'] = x_values
temp['y'] = y_values
if numerator not in self.ecdf.keys():
self.ecdf[numerator] = {}
self.ecdf[numerator][denominator] = temp
else:
self.ecdf[numerator][denominator] = temp
def _calc_lift(self):
"""Calculate the lift of the variants over the others."""
for key, val in self.posteriors.items():
if key == self.control_bucket_name:
continue
lift_over_control = np.divide(val.get_posterior_sample(), self.
posteriors[self.control_bucket_name].get_posterior_sample()
) - 1
if key not in self.lift.keys():
self.lift[key] = {}
self.lift[key][self.control_bucket_name] = lift_over_control
else:
self.lift[key][self.control_bucket_name] = lift_over_control
if self.debug:
percent_positive_lift = sum(i > 0 for i in lift_over_control
) / len(lift_over_control)
print('percent positive lift for {0} over {1} = {2:.2%}'.
format(key, self.control_bucket_name,
percent_positive_lift))
if self.compare_variants:
comparisons = list(range(0, len(self.variant_bucket_names)))
combs = combinations(comparisons, 2)
for combination in combs:
denom = self.posteriors[self.variant_bucket_names[
combination[0]]]
num = self.posteriors[self.variant_bucket_names[combination[1]]
]
lift = np.divide(num.get_posterior_sample(), denom.
get_posterior_sample()) - 1
if num.get_variant_name() not in self.lift.keys():
self.lift[num.get_variant_name()] = {}
self.lift[num.get_variant_name()][denom.get_variant_name()
] = lift
else:
self.lift[num.get_variant_name()][denom.get_variant_name()
] = lift
if self.debug:
percent_positive_lift = sum(i > 0 for i in lift) / len(lift
)
print('percent positive lift for {0} over {1} = {2:.2%}'
.format(num.get_variant_name(), denom.
get_variant_name(), percent_positive_lift))
def plot_posteriors(self, variants=[]):
"""Plot the PDFs of the posterior distributions.
Arguments:
variants {list} -- List of variant names to be plotted.
If variants is not set, all are plotted, otherwise, the variants
in the list are plotted. Variants must only have items in
bucket_col_name (default: {[]}).
"""
if variants != []:
for var in variants:
if var not in self.posteriors.keys():
raise ValueError(
'Variants must only be a value in bucket_col_name')
self._plot_posteriors(variants)
def plot_positive_lift(self, variant_one, variant_two):
"""Plot the positive lift pdt between variant_one and variant_two.
Arguments:
variant_one and variant_two should not be the same
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if (variant_one != self.control_bucket_name and variant_two != self
.control_bucket_name):
if not self.compare_variants:
raise RuntimeError(
'Compare_variants must be set to true in order to compare {0} and {1}'
.format(variant_one, variant_two))
if variant_one in self.lift.keys() and variant_two in self.lift[
variant_one].keys():
self._plot_positive_lift(numerator_name=variant_one,
denominator_name=variant_two)
else:
self._plot_positive_lift(numerator_name=variant_two,
denominator_name=variant_one)
def plot_ecdf(self, variant_one, variant_two):
"""Plot the empirical cdf for the lift b/w variant_one and variant_two.
Arguments:
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys(
) or variant_two not in self.posteriors.keys():
raise ValueError('Variants must only be a value in column {}'.
format(self.bucket_col_name))
if variant_one in self.ecdf.keys() and variant_two in self.ecdf[
variant_one].keys():
self._plot_ecdf(numerator_name=variant_one, denominator_name=
variant_two)
plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_two,
variant_one))
else:
self._plot_ecdf(numerator_name=variant_two, denominator_name=
variant_one)
plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_one,
variant_two))
<|reserved_special_token_1|>
"""Plotting functionality for ab_test_model."""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from itertools import combinations
from ._ab_test_model_utils import _ab_test_utils
# pylint: disable=no-member
class _ab_test_plotting(_ab_test_utils):
"""Provide Funcs for class to plot Bayesian charts."""
def _plot_posteriors(self, variants=[]):
"""Plot KDE of the posterior samples.
This is a private function. For a public interface, see
plot_posteriors().
Keyword Arguments:
variants {list} -- which variants to plot. If empty, all are
plotted. Otherwise, the must be contained in raw_data
(default: {[]}).
"""
if variants == []:
variants = list(self.posteriors.keys())
for variant in variants:
sns.kdeplot(self.posteriors[variant].get_posterior_sample(),
shade=True,
color=self.posteriors[variant].get_color())
plt.legend(labels=variants, loc='upper right')
if self.prior_function == 'beta':
plt.xlabel('Conversion Rate')
elif (self.prior_function == 'log-normal'
or self.prior_function == 'normal'):
plt.xlabel(self.metric)
sns.despine(left=True)
plt.yticks([], [])
title = 'Distribution(s) for {0} for {1}'.format(
self._stringify_variants(variants),
self.metric)
title = self._format_title(title)
plt.title(title)
if self.prior_function == 'beta':
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _plot_positive_lift(self, numerator_name, denominator_name):
"""Plot the lift vector as a kernel density estimation.
This is a private function. For a public interface, see
plot_positive_lift().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
lift = self.lift[numerator_name][denominator_name]
ax = sns.kdeplot(lift, shade=True)
line = ax.get_lines()[0]
x, y = line.get_data()
mask = x > 0
x, y = x[mask], y[mask]
ax.fill_between(x, y1=y, alpha=0.5, facecolor='red')
if len(self.variant_bucket_names) > 1:
title = numerator_name + ' vs ' + denominator_name
ax.set_ylabel(title, rotation=0, fontstyle='italic')
plt.axvline(x=0, linestyle='dotted', color='black')
plt.xlabel('Lift')
percent_positive_lift = sum(i > 0 for i in lift) / len(lift)
title = '{0} had {1:.2%} probability of positive lift'.format(
self.metric,
percent_positive_lift)
title = self._format_title(title)
plt.title(title)
sns.despine(left=True)
plt.yticks([], [])
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _plot_ecdf(self, numerator_name, denominator_name):
"""Plot the empirical cumulative distribution function.
This is a private function. For a public interface, see
plot_ecdf().
Arguments:
numerator_name {str} -- The name of the numerator in the lift
calculation.
denominator_name {str} -- The name of the numerator in the lift
calculation.
"""
x = self.ecdf[numerator_name][denominator_name]['x']
y = self.ecdf[numerator_name][denominator_name]['y']
lower_bound = x[y.index(min(y,
key=lambda x:
abs(x-self.confidence_level)))]
median = x[y.index(min(y, key=lambda x:abs(x-0.5)))]
upper_bound = x[y.index(min(y,
key=lambda x:
abs(x-(1-self.confidence_level))))]
sns.lineplot(x=x, y=y)
ci = 1 - self.confidence_level
title = ('Median Lift was {0:.2%}, with a '
'{1:.0%} CI of [{2:.2%}, {3:.2%}]'.format(median,
ci,
lower_bound,
upper_bound))
title = self._format_title(title)
plt.title(title)
plt.xlabel('Lift')
plt.ylabel('Cumulative Probability')
plt.axvline(x=lower_bound, linestyle='dotted', color='black')
plt.axvline(x=median, linestyle='dotted', color='black')
plt.axvline(x=upper_bound, linestyle='dotted', color='black')
sns.despine(left=True)
locs, labels = plt.xticks()
labels = self._format_axis_as_percent(locs, labels)
plt.xticks(locs, labels=labels)
def _calc_ecdf(self):
"""Calculate the empirical CDFs and set member var."""
for numerator, vals in self.lift.items():
for denominator, lift in vals.items():
raw_data = np.array(lift)
cdfx = np.sort(np.unique(lift))
x_values = np.linspace(start=min(cdfx),
stop=max(cdfx),
num=len(cdfx))
size_data = raw_data.size
y_values = []
for i in x_values:
temp = raw_data[raw_data <= i]
value = temp.size / size_data
y_values.append(value)
temp = {}
temp['x'] = x_values
temp['y'] = y_values
if numerator not in self.ecdf.keys():
self.ecdf[numerator] = {}
self.ecdf[numerator][denominator] = temp
else:
self.ecdf[numerator][denominator] = temp
def _calc_lift(self):
"""Calculate the lift of the variants over the others."""
for key, val in self.posteriors.items():
if key == self.control_bucket_name:
continue
lift_over_control = np.divide(val.get_posterior_sample(),
self.posteriors[
self.control_bucket_name]
.get_posterior_sample()) - 1
if key not in self.lift.keys():
self.lift[key] = {}
self.lift[key][self.control_bucket_name] = lift_over_control
else:
self.lift[key][self.control_bucket_name] = lift_over_control
if self.debug:
percent_positive_lift = sum(i > 0 for i in
lift_over_control) / \
len(lift_over_control)
print('percent positive lift for {0} over {1} = {2:.2%}'
.format(key, self.control_bucket_name,
percent_positive_lift))
if self.compare_variants:
comparisons = list(range(0, len(self.variant_bucket_names)))
combs = combinations(comparisons, 2)
for combination in combs:
denom = self.posteriors[
self.variant_bucket_names[combination[0]]]
num = self.posteriors[
self.variant_bucket_names[combination[1]]]
lift = np.divide(num.get_posterior_sample(),
denom.get_posterior_sample()) - 1
if num.get_variant_name() not in self.lift.keys():
self.lift[num.get_variant_name()] = {}
self.lift[num.get_variant_name()][
denom.get_variant_name()] = lift
else:
self.lift[num.get_variant_name()][
denom.get_variant_name()] = lift
if self.debug:
percent_positive_lift = sum(i > 0 for i in lift) \
/ len(lift)
print('percent positive lift for {0} over {1} = {2:.2%}'
.format(num.get_variant_name(),
denom.get_variant_name(),
percent_positive_lift))
def plot_posteriors(self, variants=[]):
"""Plot the PDFs of the posterior distributions.
Arguments:
variants {list} -- List of variant names to be plotted.
If variants is not set, all are plotted, otherwise, the variants
in the list are plotted. Variants must only have items in
bucket_col_name (default: {[]}).
"""
if variants != []:
for var in variants:
if var not in self.posteriors.keys():
raise ValueError(('Variants must only be a value in '
'bucket_col_name'))
self._plot_posteriors(variants)
def plot_positive_lift(self, variant_one, variant_two):
"""Plot the positive lift pdt between variant_one and variant_two.
Arguments:
variant_one and variant_two should not be the same
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys() or \
variant_two not in self.posteriors.keys():
raise ValueError(('Variants must only be a value in column '
'{}'.format(self.bucket_col_name)))
if variant_one != self.control_bucket_name and \
variant_two != self.control_bucket_name:
if not self.compare_variants:
raise RuntimeError('Compare_variants must be set to true in '
'order to compare {0} and {1}'
.format(variant_one, variant_two))
if variant_one in self.lift.keys() and \
variant_two in self.lift[variant_one].keys():
self._plot_positive_lift(numerator_name=variant_one,
denominator_name=variant_two)
else:
self._plot_positive_lift(numerator_name=variant_two,
denominator_name=variant_one)
def plot_ecdf(self, variant_one, variant_two):
"""Plot the empirical cdf for the lift b/w variant_one and variant_two.
Arguments:
variant_one {str} -- should be a value in bucket_col_name.
variant_two {str} -- should be a value in bucket_col_name.
"""
if variant_one == variant_two:
raise ValueError('variant_one and variant_two cannot be the same')
if variant_one not in self.posteriors.keys() or \
variant_two not in self.posteriors.keys():
raise ValueError(('Variants must only be a value in column '
'{}'.format(self.bucket_col_name)))
if variant_one in self.ecdf.keys() and \
variant_two in self.ecdf[variant_one].keys():
self._plot_ecdf(numerator_name=variant_one,
denominator_name=variant_two)
plt.ylabel('Cumulative Lift: {0} vs {1}'
.format(variant_two, variant_one))
else:
self._plot_ecdf(numerator_name=variant_two,
denominator_name=variant_one)
plt.ylabel('Cumulative Lift: {0} vs {1}'
.format(variant_one, variant_two))
|
flexible
|
{
"blob_id": "3eaa898d1428e48aeb0449c7216d0a994262f76a",
"index": 9107,
"step-1": "<mask token>\n\n\nclass _ab_test_plotting(_ab_test_utils):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def plot_positive_lift(self, variant_one, variant_two):\n \"\"\"Plot the positive lift pdt between variant_one and variant_two.\n\n Arguments:\n variant_one and variant_two should not be the same\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if (variant_one != self.control_bucket_name and variant_two != self\n .control_bucket_name):\n if not self.compare_variants:\n raise RuntimeError(\n 'Compare_variants must be set to true in order to compare {0} and {1}'\n .format(variant_one, variant_two))\n if variant_one in self.lift.keys() and variant_two in self.lift[\n variant_one].keys():\n self._plot_positive_lift(numerator_name=variant_one,\n denominator_name=variant_two)\n else:\n self._plot_positive_lift(numerator_name=variant_two,\n denominator_name=variant_one)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _ab_test_plotting(_ab_test_utils):\n <mask token>\n <mask token>\n <mask token>\n\n def _plot_ecdf(self, numerator_name, denominator_name):\n \"\"\"Plot the empirical cumulative distribution function.\n\n This is a private function. For a public interface, see\n plot_ecdf().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n lower_bound = x[y.index(min(y, key=lambda x: abs(x - self.\n confidence_level)))]\n median = x[y.index(min(y, key=lambda x: abs(x - 0.5)))]\n upper_bound = x[y.index(min(y, key=lambda x: abs(x - (1 - self.\n confidence_level))))]\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = (\n 'Median Lift was {0:.2%}, with a {1:.0%} CI of [{2:.2%}, {3:.2%}]'\n .format(median, ci, lower_bound, upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n <mask token>\n <mask token>\n\n def plot_posteriors(self, variants=[]):\n \"\"\"Plot the PDFs of the posterior distributions.\n\n Arguments:\n variants {list} -- List of variant names to be plotted.\n If variants is not set, all are plotted, otherwise, the variants\n in the list are plotted. Variants must only have items in\n bucket_col_name (default: {[]}).\n \"\"\"\n if variants != []:\n for var in variants:\n if var not in self.posteriors.keys():\n raise ValueError(\n 'Variants must only be a value in bucket_col_name')\n self._plot_posteriors(variants)\n\n def plot_positive_lift(self, variant_one, variant_two):\n \"\"\"Plot the positive lift pdt between variant_one and variant_two.\n\n Arguments:\n variant_one and variant_two should not be the same\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if (variant_one != self.control_bucket_name and variant_two != self\n .control_bucket_name):\n if not self.compare_variants:\n raise RuntimeError(\n 'Compare_variants must be set to true in order to compare {0} and {1}'\n .format(variant_one, variant_two))\n if variant_one in self.lift.keys() and variant_two in self.lift[\n variant_one].keys():\n self._plot_positive_lift(numerator_name=variant_one,\n denominator_name=variant_two)\n else:\n self._plot_positive_lift(numerator_name=variant_two,\n denominator_name=variant_one)\n\n def plot_ecdf(self, variant_one, variant_two):\n \"\"\"Plot the empirical cdf for the lift b/w variant_one and variant_two.\n\n Arguments:\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if variant_one in self.ecdf.keys() and variant_two in self.ecdf[\n variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one, denominator_name=\n variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_two,\n variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two, denominator_name=\n variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_one,\n variant_two))\n",
"step-3": "<mask token>\n\n\nclass _ab_test_plotting(_ab_test_utils):\n <mask token>\n <mask token>\n\n def _plot_positive_lift(self, numerator_name, denominator_name):\n \"\"\"Plot the lift vector as a kernel density estimation.\n\n This is a private function. For a public interface, see\n plot_positive_lift().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n lift = self.lift[numerator_name][denominator_name]\n ax = sns.kdeplot(lift, shade=True)\n line = ax.get_lines()[0]\n x, y = line.get_data()\n mask = x > 0\n x, y = x[mask], y[mask]\n ax.fill_between(x, y1=y, alpha=0.5, facecolor='red')\n if len(self.variant_bucket_names) > 1:\n title = numerator_name + ' vs ' + denominator_name\n ax.set_ylabel(title, rotation=0, fontstyle='italic')\n plt.axvline(x=0, linestyle='dotted', color='black')\n plt.xlabel('Lift')\n percent_positive_lift = sum(i > 0 for i in lift) / len(lift)\n title = '{0} had {1:.2%} probability of positive lift'.format(self.\n metric, percent_positive_lift)\n title = self._format_title(title)\n plt.title(title)\n sns.despine(left=True)\n plt.yticks([], [])\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _plot_ecdf(self, numerator_name, denominator_name):\n \"\"\"Plot the empirical cumulative distribution function.\n\n This is a private function. For a public interface, see\n plot_ecdf().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n lower_bound = x[y.index(min(y, key=lambda x: abs(x - self.\n confidence_level)))]\n median = x[y.index(min(y, key=lambda x: abs(x - 0.5)))]\n upper_bound = x[y.index(min(y, key=lambda x: abs(x - (1 - self.\n confidence_level))))]\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = (\n 'Median Lift was {0:.2%}, with a {1:.0%} CI of [{2:.2%}, {3:.2%}]'\n .format(median, ci, lower_bound, upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n <mask token>\n\n def _calc_lift(self):\n \"\"\"Calculate the lift of the variants over the others.\"\"\"\n for key, val in self.posteriors.items():\n if key == self.control_bucket_name:\n continue\n lift_over_control = np.divide(val.get_posterior_sample(), self.\n posteriors[self.control_bucket_name].get_posterior_sample()\n ) - 1\n if key not in self.lift.keys():\n self.lift[key] = {}\n self.lift[key][self.control_bucket_name] = lift_over_control\n else:\n self.lift[key][self.control_bucket_name] = lift_over_control\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in lift_over_control\n ) / len(lift_over_control)\n print('percent positive lift for {0} over {1} = {2:.2%}'.\n format(key, self.control_bucket_name,\n percent_positive_lift))\n if self.compare_variants:\n comparisons = list(range(0, len(self.variant_bucket_names)))\n combs = combinations(comparisons, 2)\n for combination in combs:\n denom = self.posteriors[self.variant_bucket_names[\n combination[0]]]\n num = self.posteriors[self.variant_bucket_names[combination[1]]\n ]\n lift = np.divide(num.get_posterior_sample(), denom.\n get_posterior_sample()) - 1\n if num.get_variant_name() not in self.lift.keys():\n self.lift[num.get_variant_name()] = {}\n self.lift[num.get_variant_name()][denom.get_variant_name()\n ] = lift\n else:\n self.lift[num.get_variant_name()][denom.get_variant_name()\n ] = lift\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in lift) / len(lift\n )\n print('percent positive lift for {0} over {1} = {2:.2%}'\n .format(num.get_variant_name(), denom.\n get_variant_name(), percent_positive_lift))\n\n def plot_posteriors(self, variants=[]):\n \"\"\"Plot the PDFs of the posterior distributions.\n\n Arguments:\n variants {list} -- List of variant names to be plotted.\n If variants is not set, all are plotted, otherwise, the variants\n in the list are plotted. Variants must only have items in\n bucket_col_name (default: {[]}).\n \"\"\"\n if variants != []:\n for var in variants:\n if var not in self.posteriors.keys():\n raise ValueError(\n 'Variants must only be a value in bucket_col_name')\n self._plot_posteriors(variants)\n\n def plot_positive_lift(self, variant_one, variant_two):\n \"\"\"Plot the positive lift pdt between variant_one and variant_two.\n\n Arguments:\n variant_one and variant_two should not be the same\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if (variant_one != self.control_bucket_name and variant_two != self\n .control_bucket_name):\n if not self.compare_variants:\n raise RuntimeError(\n 'Compare_variants must be set to true in order to compare {0} and {1}'\n .format(variant_one, variant_two))\n if variant_one in self.lift.keys() and variant_two in self.lift[\n variant_one].keys():\n self._plot_positive_lift(numerator_name=variant_one,\n denominator_name=variant_two)\n else:\n self._plot_positive_lift(numerator_name=variant_two,\n denominator_name=variant_one)\n\n def plot_ecdf(self, variant_one, variant_two):\n \"\"\"Plot the empirical cdf for the lift b/w variant_one and variant_two.\n\n Arguments:\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if variant_one in self.ecdf.keys() and variant_two in self.ecdf[\n variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one, denominator_name=\n variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_two,\n variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two, denominator_name=\n variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_one,\n variant_two))\n",
"step-4": "<mask token>\n\n\nclass _ab_test_plotting(_ab_test_utils):\n \"\"\"Provide Funcs for class to plot Bayesian charts.\"\"\"\n\n def _plot_posteriors(self, variants=[]):\n \"\"\"Plot KDE of the posterior samples.\n\n This is a private function. For a public interface, see\n plot_posteriors().\n\n Keyword Arguments:\n variants {list} -- which variants to plot. If empty, all are\n plotted. Otherwise, the must be contained in raw_data\n (default: {[]}).\n \"\"\"\n if variants == []:\n variants = list(self.posteriors.keys())\n for variant in variants:\n sns.kdeplot(self.posteriors[variant].get_posterior_sample(),\n shade=True, color=self.posteriors[variant].get_color())\n plt.legend(labels=variants, loc='upper right')\n if self.prior_function == 'beta':\n plt.xlabel('Conversion Rate')\n elif self.prior_function == 'log-normal' or self.prior_function == 'normal':\n plt.xlabel(self.metric)\n sns.despine(left=True)\n plt.yticks([], [])\n title = 'Distribution(s) for {0} for {1}'.format(self.\n _stringify_variants(variants), self.metric)\n title = self._format_title(title)\n plt.title(title)\n if self.prior_function == 'beta':\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _plot_positive_lift(self, numerator_name, denominator_name):\n \"\"\"Plot the lift vector as a kernel density estimation.\n\n This is a private function. For a public interface, see\n plot_positive_lift().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n lift = self.lift[numerator_name][denominator_name]\n ax = sns.kdeplot(lift, shade=True)\n line = ax.get_lines()[0]\n x, y = line.get_data()\n mask = x > 0\n x, y = x[mask], y[mask]\n ax.fill_between(x, y1=y, alpha=0.5, facecolor='red')\n if len(self.variant_bucket_names) > 1:\n title = numerator_name + ' vs ' + denominator_name\n ax.set_ylabel(title, rotation=0, fontstyle='italic')\n plt.axvline(x=0, linestyle='dotted', color='black')\n plt.xlabel('Lift')\n percent_positive_lift = sum(i > 0 for i in lift) / len(lift)\n title = '{0} had {1:.2%} probability of positive lift'.format(self.\n metric, percent_positive_lift)\n title = self._format_title(title)\n plt.title(title)\n sns.despine(left=True)\n plt.yticks([], [])\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _plot_ecdf(self, numerator_name, denominator_name):\n \"\"\"Plot the empirical cumulative distribution function.\n\n This is a private function. For a public interface, see\n plot_ecdf().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n lower_bound = x[y.index(min(y, key=lambda x: abs(x - self.\n confidence_level)))]\n median = x[y.index(min(y, key=lambda x: abs(x - 0.5)))]\n upper_bound = x[y.index(min(y, key=lambda x: abs(x - (1 - self.\n confidence_level))))]\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = (\n 'Median Lift was {0:.2%}, with a {1:.0%} CI of [{2:.2%}, {3:.2%}]'\n .format(median, ci, lower_bound, upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _calc_ecdf(self):\n \"\"\"Calculate the empirical CDFs and set member var.\"\"\"\n for numerator, vals in self.lift.items():\n for denominator, lift in vals.items():\n raw_data = np.array(lift)\n cdfx = np.sort(np.unique(lift))\n x_values = np.linspace(start=min(cdfx), stop=max(cdfx), num\n =len(cdfx))\n size_data = raw_data.size\n y_values = []\n for i in x_values:\n temp = raw_data[raw_data <= i]\n value = temp.size / size_data\n y_values.append(value)\n temp = {}\n temp['x'] = x_values\n temp['y'] = y_values\n if numerator not in self.ecdf.keys():\n self.ecdf[numerator] = {}\n self.ecdf[numerator][denominator] = temp\n else:\n self.ecdf[numerator][denominator] = temp\n\n def _calc_lift(self):\n \"\"\"Calculate the lift of the variants over the others.\"\"\"\n for key, val in self.posteriors.items():\n if key == self.control_bucket_name:\n continue\n lift_over_control = np.divide(val.get_posterior_sample(), self.\n posteriors[self.control_bucket_name].get_posterior_sample()\n ) - 1\n if key not in self.lift.keys():\n self.lift[key] = {}\n self.lift[key][self.control_bucket_name] = lift_over_control\n else:\n self.lift[key][self.control_bucket_name] = lift_over_control\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in lift_over_control\n ) / len(lift_over_control)\n print('percent positive lift for {0} over {1} = {2:.2%}'.\n format(key, self.control_bucket_name,\n percent_positive_lift))\n if self.compare_variants:\n comparisons = list(range(0, len(self.variant_bucket_names)))\n combs = combinations(comparisons, 2)\n for combination in combs:\n denom = self.posteriors[self.variant_bucket_names[\n combination[0]]]\n num = self.posteriors[self.variant_bucket_names[combination[1]]\n ]\n lift = np.divide(num.get_posterior_sample(), denom.\n get_posterior_sample()) - 1\n if num.get_variant_name() not in self.lift.keys():\n self.lift[num.get_variant_name()] = {}\n self.lift[num.get_variant_name()][denom.get_variant_name()\n ] = lift\n else:\n self.lift[num.get_variant_name()][denom.get_variant_name()\n ] = lift\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in lift) / len(lift\n )\n print('percent positive lift for {0} over {1} = {2:.2%}'\n .format(num.get_variant_name(), denom.\n get_variant_name(), percent_positive_lift))\n\n def plot_posteriors(self, variants=[]):\n \"\"\"Plot the PDFs of the posterior distributions.\n\n Arguments:\n variants {list} -- List of variant names to be plotted.\n If variants is not set, all are plotted, otherwise, the variants\n in the list are plotted. Variants must only have items in\n bucket_col_name (default: {[]}).\n \"\"\"\n if variants != []:\n for var in variants:\n if var not in self.posteriors.keys():\n raise ValueError(\n 'Variants must only be a value in bucket_col_name')\n self._plot_posteriors(variants)\n\n def plot_positive_lift(self, variant_one, variant_two):\n \"\"\"Plot the positive lift pdt between variant_one and variant_two.\n\n Arguments:\n variant_one and variant_two should not be the same\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if (variant_one != self.control_bucket_name and variant_two != self\n .control_bucket_name):\n if not self.compare_variants:\n raise RuntimeError(\n 'Compare_variants must be set to true in order to compare {0} and {1}'\n .format(variant_one, variant_two))\n if variant_one in self.lift.keys() and variant_two in self.lift[\n variant_one].keys():\n self._plot_positive_lift(numerator_name=variant_one,\n denominator_name=variant_two)\n else:\n self._plot_positive_lift(numerator_name=variant_two,\n denominator_name=variant_one)\n\n def plot_ecdf(self, variant_one, variant_two):\n \"\"\"Plot the empirical cdf for the lift b/w variant_one and variant_two.\n\n Arguments:\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys(\n ) or variant_two not in self.posteriors.keys():\n raise ValueError('Variants must only be a value in column {}'.\n format(self.bucket_col_name))\n if variant_one in self.ecdf.keys() and variant_two in self.ecdf[\n variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one, denominator_name=\n variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_two,\n variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two, denominator_name=\n variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'.format(variant_one,\n variant_two))\n",
"step-5": "\"\"\"Plotting functionality for ab_test_model.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom itertools import combinations\nfrom ._ab_test_model_utils import _ab_test_utils\n# pylint: disable=no-member\n\n\nclass _ab_test_plotting(_ab_test_utils):\n \"\"\"Provide Funcs for class to plot Bayesian charts.\"\"\"\n\n def _plot_posteriors(self, variants=[]):\n \"\"\"Plot KDE of the posterior samples.\n\n This is a private function. For a public interface, see\n plot_posteriors().\n\n Keyword Arguments:\n variants {list} -- which variants to plot. If empty, all are\n plotted. Otherwise, the must be contained in raw_data\n (default: {[]}).\n \"\"\"\n if variants == []:\n variants = list(self.posteriors.keys())\n for variant in variants:\n sns.kdeplot(self.posteriors[variant].get_posterior_sample(),\n shade=True,\n color=self.posteriors[variant].get_color())\n plt.legend(labels=variants, loc='upper right')\n if self.prior_function == 'beta':\n plt.xlabel('Conversion Rate')\n elif (self.prior_function == 'log-normal'\n or self.prior_function == 'normal'):\n plt.xlabel(self.metric)\n sns.despine(left=True)\n plt.yticks([], [])\n title = 'Distribution(s) for {0} for {1}'.format(\n self._stringify_variants(variants),\n self.metric)\n title = self._format_title(title)\n plt.title(title)\n if self.prior_function == 'beta':\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _plot_positive_lift(self, numerator_name, denominator_name):\n \"\"\"Plot the lift vector as a kernel density estimation.\n\n This is a private function. For a public interface, see\n plot_positive_lift().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n lift = self.lift[numerator_name][denominator_name]\n ax = sns.kdeplot(lift, shade=True)\n line = ax.get_lines()[0]\n x, y = line.get_data()\n mask = x > 0\n x, y = x[mask], y[mask]\n ax.fill_between(x, y1=y, alpha=0.5, facecolor='red')\n if len(self.variant_bucket_names) > 1:\n title = numerator_name + ' vs ' + denominator_name\n ax.set_ylabel(title, rotation=0, fontstyle='italic')\n plt.axvline(x=0, linestyle='dotted', color='black')\n plt.xlabel('Lift')\n percent_positive_lift = sum(i > 0 for i in lift) / len(lift)\n title = '{0} had {1:.2%} probability of positive lift'.format(\n self.metric,\n percent_positive_lift)\n title = self._format_title(title)\n plt.title(title)\n sns.despine(left=True)\n plt.yticks([], [])\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _plot_ecdf(self, numerator_name, denominator_name):\n \"\"\"Plot the empirical cumulative distribution function.\n\n This is a private function. For a public interface, see\n plot_ecdf().\n\n Arguments:\n numerator_name {str} -- The name of the numerator in the lift\n calculation.\n denominator_name {str} -- The name of the numerator in the lift\n calculation.\n \"\"\"\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n\n lower_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-self.confidence_level)))]\n median = x[y.index(min(y, key=lambda x:abs(x-0.5)))]\n upper_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-(1-self.confidence_level))))]\n\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = ('Median Lift was {0:.2%}, with a '\n '{1:.0%} CI of [{2:.2%}, {3:.2%}]'.format(median,\n ci,\n lower_bound,\n upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)\n\n def _calc_ecdf(self):\n \"\"\"Calculate the empirical CDFs and set member var.\"\"\"\n for numerator, vals in self.lift.items():\n for denominator, lift in vals.items():\n raw_data = np.array(lift)\n cdfx = np.sort(np.unique(lift))\n x_values = np.linspace(start=min(cdfx),\n stop=max(cdfx),\n num=len(cdfx))\n size_data = raw_data.size\n y_values = []\n for i in x_values:\n temp = raw_data[raw_data <= i]\n value = temp.size / size_data\n y_values.append(value)\n temp = {}\n temp['x'] = x_values\n temp['y'] = y_values\n if numerator not in self.ecdf.keys():\n self.ecdf[numerator] = {}\n self.ecdf[numerator][denominator] = temp\n else:\n self.ecdf[numerator][denominator] = temp\n\n def _calc_lift(self):\n \"\"\"Calculate the lift of the variants over the others.\"\"\"\n for key, val in self.posteriors.items():\n if key == self.control_bucket_name:\n continue\n lift_over_control = np.divide(val.get_posterior_sample(),\n self.posteriors[\n self.control_bucket_name]\n .get_posterior_sample()) - 1\n if key not in self.lift.keys():\n self.lift[key] = {}\n self.lift[key][self.control_bucket_name] = lift_over_control\n else:\n self.lift[key][self.control_bucket_name] = lift_over_control\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in\n lift_over_control) / \\\n len(lift_over_control)\n print('percent positive lift for {0} over {1} = {2:.2%}'\n .format(key, self.control_bucket_name,\n percent_positive_lift))\n\n if self.compare_variants:\n comparisons = list(range(0, len(self.variant_bucket_names)))\n combs = combinations(comparisons, 2)\n for combination in combs:\n denom = self.posteriors[\n self.variant_bucket_names[combination[0]]]\n num = self.posteriors[\n self.variant_bucket_names[combination[1]]]\n lift = np.divide(num.get_posterior_sample(),\n denom.get_posterior_sample()) - 1\n if num.get_variant_name() not in self.lift.keys():\n self.lift[num.get_variant_name()] = {}\n self.lift[num.get_variant_name()][\n denom.get_variant_name()] = lift\n else:\n self.lift[num.get_variant_name()][\n denom.get_variant_name()] = lift\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in lift) \\\n / len(lift)\n print('percent positive lift for {0} over {1} = {2:.2%}'\n .format(num.get_variant_name(),\n denom.get_variant_name(),\n percent_positive_lift))\n\n def plot_posteriors(self, variants=[]):\n \"\"\"Plot the PDFs of the posterior distributions.\n\n Arguments:\n variants {list} -- List of variant names to be plotted.\n If variants is not set, all are plotted, otherwise, the variants\n in the list are plotted. Variants must only have items in\n bucket_col_name (default: {[]}).\n \"\"\"\n if variants != []:\n for var in variants:\n if var not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in '\n 'bucket_col_name'))\n self._plot_posteriors(variants)\n\n def plot_positive_lift(self, variant_one, variant_two):\n \"\"\"Plot the positive lift pdt between variant_one and variant_two.\n\n Arguments:\n variant_one and variant_two should not be the same\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys() or \\\n variant_two not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in column '\n '{}'.format(self.bucket_col_name)))\n\n if variant_one != self.control_bucket_name and \\\n variant_two != self.control_bucket_name:\n if not self.compare_variants:\n raise RuntimeError('Compare_variants must be set to true in '\n 'order to compare {0} and {1}'\n .format(variant_one, variant_two))\n if variant_one in self.lift.keys() and \\\n variant_two in self.lift[variant_one].keys():\n self._plot_positive_lift(numerator_name=variant_one,\n denominator_name=variant_two)\n else:\n self._plot_positive_lift(numerator_name=variant_two,\n denominator_name=variant_one)\n\n def plot_ecdf(self, variant_one, variant_two):\n \"\"\"Plot the empirical cdf for the lift b/w variant_one and variant_two.\n\n Arguments:\n variant_one {str} -- should be a value in bucket_col_name.\n variant_two {str} -- should be a value in bucket_col_name.\n \"\"\"\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys() or \\\n variant_two not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in column '\n '{}'.format(self.bucket_col_name)))\n\n if variant_one in self.ecdf.keys() and \\\n variant_two in self.ecdf[variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one,\n denominator_name=variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_two, variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two,\n denominator_name=variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_one, variant_two))\n",
"step-ids": [
2,
5,
7,
10,
12
]
}
|
[
2,
5,
7,
10,
12
] |
<|reserved_special_token_0|>
class StereoBM:
<|reserved_special_token_0|>
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1
] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
rows = np.int32(left_img.shape[0])
cols = np.int32(left_img.shape[1])
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
<|reserved_special_token_0|>
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,
distC_r=None, irA_l=None, irA_r=None, bm_state=None):
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0,
933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)
if cameraMA_r is None:
self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0,
933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)
if distC_l is None:
self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,
0.0], dtype=np.float32)
else:
self.distC_l_fl = np.array(distC_l, dtype=np.float32)
if distC_r is None:
self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,
0.0], dtype=np.float32)
else:
self.distC_r_fl = np.array(distC_r, dtype=np.float32)
if irA_l is None:
self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -
0.8153011732, 7e-10, 0.0011976994, -0.4422348617,
1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)
else:
self.irA_l_fl = np.array(irA_l, dtype=np.float32)
if irA_r is None:
self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,
-0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07,
1.0000392898], dtype=np.float32)
else:
self.irA_r_fl = np.array(irA_r, dtype=np.float32)
if bm_state is None:
self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,
16, 3, 0], dtype=np.int32)
else:
self.bm_state_arr = np.array(bm_state, dtype=np.int32)
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1
] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
rows = np.int32(left_img.shape[0])
cols = np.int32(left_img.shape[1])
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
<|reserved_special_token_0|>
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,
distC_r=None, irA_l=None, irA_r=None, bm_state=None):
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0,
933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)
if cameraMA_r is None:
self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0,
933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)
if distC_l is None:
self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,
0.0], dtype=np.float32)
else:
self.distC_l_fl = np.array(distC_l, dtype=np.float32)
if distC_r is None:
self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,
0.0], dtype=np.float32)
else:
self.distC_r_fl = np.array(distC_r, dtype=np.float32)
if irA_l is None:
self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -
0.8153011732, 7e-10, 0.0011976994, -0.4422348617,
1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)
else:
self.irA_l_fl = np.array(irA_l, dtype=np.float32)
if irA_r is None:
self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,
-0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07,
1.0000392898], dtype=np.float32)
else:
self.irA_r_fl = np.array(irA_r, dtype=np.float32)
if bm_state is None:
self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,
16, 3, 0], dtype=np.int32)
else:
self.bm_state_arr = np.array(bm_state, dtype=np.int32)
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1
] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
rows = np.int32(left_img.shape[0])
cols = np.int32(left_img.shape[1])
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
def wait(self):
self.response.result()
disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /
16.0).astype(np.uint8)
self.m_runEndTime = int(round(time.time() * 1000000))
return disp_mat_scaled
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
<|reserved_special_token_1|>
import inaccel.coral as inaccel
import numpy as np
import time
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,
distC_r=None, irA_l=None, irA_r=None, bm_state=None):
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0,
933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)
if cameraMA_r is None:
self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0,
933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)
if distC_l is None:
self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,
0.0], dtype=np.float32)
else:
self.distC_l_fl = np.array(distC_l, dtype=np.float32)
if distC_r is None:
self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,
0.0], dtype=np.float32)
else:
self.distC_r_fl = np.array(distC_r, dtype=np.float32)
if irA_l is None:
self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -
0.8153011732, 7e-10, 0.0011976994, -0.4422348617,
1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)
else:
self.irA_l_fl = np.array(irA_l, dtype=np.float32)
if irA_r is None:
self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,
-0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07,
1.0000392898], dtype=np.float32)
else:
self.irA_r_fl = np.array(irA_r, dtype=np.float32)
if bm_state is None:
self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,
16, 3, 0], dtype=np.int32)
else:
self.bm_state_arr = np.array(bm_state, dtype=np.int32)
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1
] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
rows = np.int32(left_img.shape[0])
cols = np.int32(left_img.shape[1])
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
def wait(self):
self.response.result()
disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /
16.0).astype(np.uint8)
self.m_runEndTime = int(round(time.time() * 1000000))
return disp_mat_scaled
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
<|reserved_special_token_1|>
import inaccel.coral as inaccel
import numpy as np
import time
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):
# allocate mem for camera parameters for rectification and bm_state class
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)
if cameraMA_r is None:
self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)
if distC_l is None:
self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32)
else:
self.distC_l_fl = np.array(distC_l, dtype=np.float32)
if distC_r is None:
self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32)
else:
self.distC_r_fl = np.array(distC_r, dtype=np.float32)
if irA_l is None:
self.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \
-0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32)
else:
self.irA_l_fl = np.array(irA_l, dtype=np.float32)
if irA_r is None:
self.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \
-0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32)
else:
self.irA_r_fl = np.array(irA_r, dtype=np.float32)
if bm_state is None:
self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32)
else:
self.bm_state_arr = np.array(bm_state, dtype=np.int32)
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
# allocate and initialize buffers
rows = np.int32(left_img.shape[0]);
cols = np.int32(left_img.shape[1]);
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
# Create request for stereo accelerator
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
def wait(self):
# Send request and wait for completion
self.response.result()
# Write output image
disp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8)
self.m_runEndTime = int(round(time.time() * 1000000))
return disp_mat_scaled;
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
|
flexible
|
{
"blob_id": "66f3590381fe96c49a8926a806b4a845f0d7e25d",
"index": 4681,
"step-1": "<mask token>\n\n\nclass StereoBM:\n <mask token>\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n <mask token>\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-2": "<mask token>\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n <mask token>\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-3": "<mask token>\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n\n def wait(self):\n self.response.result()\n disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /\n 16.0).astype(np.uint8)\n self.m_runEndTime = int(round(time.time() * 1000000))\n return disp_mat_scaled\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-4": "import inaccel.coral as inaccel\nimport numpy as np\nimport time\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n\n def wait(self):\n self.response.result()\n disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /\n 16.0).astype(np.uint8)\n self.m_runEndTime = int(round(time.time() * 1000000))\n return disp_mat_scaled\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-5": "import inaccel.coral as inaccel\nimport numpy as np\nimport time\n\nclass StereoBM:\n\tdef __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):\n\t\t# allocate mem for camera parameters for rectification and bm_state class\n\t\twith inaccel.allocator:\n\t\t\tif cameraMA_l is None:\n\t\t\t\tself.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n\n\t\t\tif cameraMA_r is None:\n\t\t\t\tself.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n\n\t\t\tif distC_l is None:\n\t\t\t\tself.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.distC_l_fl = np.array(distC_l, dtype=np.float32)\n\n\t\t\tif distC_r is None:\n\t\t\t\tself.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.distC_r_fl = np.array(distC_r, dtype=np.float32)\n\n\t\t\tif irA_l is None:\n\t\t\t\tself.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \\\n\t \t\t\t\t\t\t\t\t-0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.irA_l_fl = np.array(irA_l, dtype=np.float32)\n\n\t\t\tif irA_r is None:\n\t\t\t\tself.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \\\n\t -0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.irA_r_fl = np.array(irA_r, dtype=np.float32)\n\n\t\t\tif bm_state is None:\n\t\t\t\tself.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32)\n\t\t\telse:\n\t\t\t\tself.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n\n\tdef runAsync(self, left_img, right_img):\n\t\tself.m_runStartTime = int(round(time.time() * 1000000))\n\n\t\tif left_img is None:\n\t\t\traise RuntimeError('Invalid left image')\n\t\tif right_img is None:\n\t\t\traise RuntimeError('Invalid right image')\n\t\tif left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]:\n\t\t\traise RuntimeError('Image sizes differ')\n\n\t\t# allocate and initialize buffers\n\t\trows = np.int32(left_img.shape[0]);\n\t\tcols = np.int32(left_img.shape[1]);\n\n\t\twith inaccel.allocator:\n\t\t\tself.left_mat = np.array(left_img)\n\t\t\tself.right_mat = np.array(right_img)\n\t\t\tself.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n\n\t\t# Create request for stereo accelerator\n\t\treq = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n\t\treq.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n\t\treq.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n\t\treq.arg(self.distC_l_fl).arg(self.distC_r_fl)\n\t\treq.arg(self.irA_l_fl).arg(self.irA_r_fl)\n\t\treq.arg(self.bm_state_arr)\n\t\treq.arg(rows).arg(cols)\n\n\t\tself.response = inaccel.submit(req)\n\n\tdef wait(self):\n\t\t# Send request and wait for completion\n\t\tself.response.result()\n\n\t\t# Write output image\n\t\tdisp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8)\n\n\t\tself.m_runEndTime = int(round(time.time() * 1000000))\n\t\treturn disp_mat_scaled;\n\n\tdef run(self, left_img, right_img):\n\t\tself.runAsync(left_img, right_img)\n\t\treturn self.wait()\n\n\tdef lastruntime(self):\n\t\tduration = self.m_runEndTime - self.m_runStartTime\n\t\treturn duration\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 3.2.4 on 2021-06-16 13:41
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('FAQ', '0004_auto_20210616_1253'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='link',
),
migrations.RemoveField(
model_name='question',
name='photo',
),
migrations.AlterField(
model_name='question',
name='answer',
field=ckeditor.fields.RichTextField(),
),
]
|
normal
|
{
"blob_id": "a4c4a5cc63c345d1fa8cbf426f7857a0f3d4357f",
"index": 8360,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('FAQ', '0004_auto_20210616_1253')]\n operations = [migrations.RemoveField(model_name='question', name='link'\n ), migrations.RemoveField(model_name='question', name='photo'),\n migrations.AlterField(model_name='question', name='answer', field=\n ckeditor.fields.RichTextField())]\n",
"step-4": "import ckeditor.fields\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('FAQ', '0004_auto_20210616_1253')]\n operations = [migrations.RemoveField(model_name='question', name='link'\n ), migrations.RemoveField(model_name='question', name='photo'),\n migrations.AlterField(model_name='question', name='answer', field=\n ckeditor.fields.RichTextField())]\n",
"step-5": "# Generated by Django 3.2.4 on 2021-06-16 13:41\n\nimport ckeditor.fields\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('FAQ', '0004_auto_20210616_1253'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='question',\n name='link',\n ),\n migrations.RemoveField(\n model_name='question',\n name='photo',\n ),\n migrations.AlterField(\n model_name='question',\n name='answer',\n field=ckeditor.fields.RichTextField(),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def findNearestPoint(points, no_used, src):
dest = src
minDist = sys.float_info.max
for i in range(len(points)):
if no_used[i] and i != src:
dist = utils.length(points[src], points[i])
if dist < minDist:
dest = i
minDist = dist
return dest, minDist
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def findNearestPoint(points, no_used, src):
dest = src
minDist = sys.float_info.max
for i in range(len(points)):
if no_used[i] and i != src:
dist = utils.length(points[src], points[i])
if dist < minDist:
dest = i
minDist = dist
return dest, minDist
def solve(points):
tour = [(0) for i in range(len(points))]
no_used = [(True) for i in range(len(points))]
totalDist = 0.0
src = 0
no_used[0] = False
for i in range(1, len(points)):
dest, minDist = findNearestPoint(points, no_used, src)
tour[i] = dest
no_used[dest] = False
src = dest
totalDist += minDist
return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour
<|reserved_special_token_1|>
import sys
import utils
def findNearestPoint(points, no_used, src):
dest = src
minDist = sys.float_info.max
for i in range(len(points)):
if no_used[i] and i != src:
dist = utils.length(points[src], points[i])
if dist < minDist:
dest = i
minDist = dist
return dest, minDist
def solve(points):
tour = [(0) for i in range(len(points))]
no_used = [(True) for i in range(len(points))]
totalDist = 0.0
src = 0
no_used[0] = False
for i in range(1, len(points)):
dest, minDist = findNearestPoint(points, no_used, src)
tour[i] = dest
no_used[dest] = False
src = dest
totalDist += minDist
return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour
<|reserved_special_token_1|>
import sys
import utils
#import random
def findNearestPoint(points,no_used , src):
# If no nearest point found, return max.
dest = src
minDist = sys.float_info.max
for i in range(len(points)):
if no_used[i] and i!=src:
dist = utils.length(points[src], points[i])
if dist < minDist:
dest =i
minDist = dist
return dest, minDist
def solve(points):
#get an initial tour by NearestPoint method
tour = [0 for i in range(len(points))]
no_used = [True for i in range(len(points))]
totalDist = 0.0
# src =int( random.random()*(len(points)-1))
# no_used[src] = False
# tour[0]=src
src =0
no_used[0] = False
for i in range(1, len(points)):
dest, minDist = findNearestPoint(points, no_used, src) #find Nearest Point
tour[i] = dest
no_used[dest] = False #have been used
src = dest
totalDist += minDist
#plus distance between last point and initial point
return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour
|
flexible
|
{
"blob_id": "943db90aa7721ddad3d7f5103c4d398fbf4e143b",
"index": 2768,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef findNearestPoint(points, no_used, src):\n dest = src\n minDist = sys.float_info.max\n for i in range(len(points)):\n if no_used[i] and i != src:\n dist = utils.length(points[src], points[i])\n if dist < minDist:\n dest = i\n minDist = dist\n return dest, minDist\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef findNearestPoint(points, no_used, src):\n dest = src\n minDist = sys.float_info.max\n for i in range(len(points)):\n if no_used[i] and i != src:\n dist = utils.length(points[src], points[i])\n if dist < minDist:\n dest = i\n minDist = dist\n return dest, minDist\n\n\ndef solve(points):\n tour = [(0) for i in range(len(points))]\n no_used = [(True) for i in range(len(points))]\n totalDist = 0.0\n src = 0\n no_used[0] = False\n for i in range(1, len(points)):\n dest, minDist = findNearestPoint(points, no_used, src)\n tour[i] = dest\n no_used[dest] = False\n src = dest\n totalDist += minDist\n return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour\n",
"step-4": "import sys\nimport utils\n\n\ndef findNearestPoint(points, no_used, src):\n dest = src\n minDist = sys.float_info.max\n for i in range(len(points)):\n if no_used[i] and i != src:\n dist = utils.length(points[src], points[i])\n if dist < minDist:\n dest = i\n minDist = dist\n return dest, minDist\n\n\ndef solve(points):\n tour = [(0) for i in range(len(points))]\n no_used = [(True) for i in range(len(points))]\n totalDist = 0.0\n src = 0\n no_used[0] = False\n for i in range(1, len(points)):\n dest, minDist = findNearestPoint(points, no_used, src)\n tour[i] = dest\n no_used[dest] = False\n src = dest\n totalDist += minDist\n return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour\n",
"step-5": "import sys\nimport utils\n#import random\n\ndef findNearestPoint(points,no_used , src):\n # If no nearest point found, return max.\n \n dest = src\n minDist = sys.float_info.max\n \n for i in range(len(points)):\n if no_used[i] and i!=src:\n\n \n dist = utils.length(points[src], points[i]) \n if dist < minDist:\n dest =i\n minDist = dist \n \n\n return dest, minDist\n \ndef solve(points):\n #get an initial tour by NearestPoint method\n tour = [0 for i in range(len(points))]\n no_used = [True for i in range(len(points))]\n totalDist = 0.0\n \n# src =int( random.random()*(len(points)-1))\n# no_used[src] = False\n# tour[0]=src\n src =0\n no_used[0] = False\n \n for i in range(1, len(points)):\n dest, minDist = findNearestPoint(points, no_used, src) #find Nearest Point\n tour[i] = dest\n no_used[dest] = False #have been used\n src = dest\n totalDist += minDist\n #plus distance between last point and initial point\n return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class projectile(pygame.sprite.Sprite):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class enemy(pygame.sprite.Sprite):
im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
imageList = [im, im2, im3]
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.hitbox = self.x, self.y, 60, 60
self.vel = 6
self.imageRandom = random.choice(self.imageList)
def draw(self, win):
self.move_enemy()
win.blit(self.imageRandom, (self.x, self.y))
def move_enemy(self):
if self.vel > 0:
if self.y + self.vel < 560:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
elif self.y - self.vel > 10:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class projectile(pygame.sprite.Sprite):
def __init__(self, x, y, ux, uy):
pygame.sprite.Sprite.__init__(self)
self.x = x + 30
self.y = y
self.startX = self.x
self.startY = self.y
self.horVel = ux
self.verVel = uy
self.color = random.choice(colorList)
self.bulletTime = 0.0
self.status = 1
def update(self):
global maxHeight
global maxHeightPos
global landingPos
global ranges
global trace
if self.y <= screen_height:
self.bulletTime += timeStep
self.x = self.horVel * self.bulletTime * pixelRatio + self.startX
self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.
bulletTime ** 2) * pixelRatio + self.startY
trace.append([self.x, self.y])
if self.x >= screen_width:
self.status = 0
if self.y < 0:
self.status = 0
else:
self.status = 0
pygame.display.update()
def draw(self, win):
pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)
for t in traceShow:
pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)
class enemy(pygame.sprite.Sprite):
im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
imageList = [im, im2, im3]
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.hitbox = self.x, self.y, 60, 60
self.vel = 6
self.imageRandom = random.choice(self.imageList)
def draw(self, win):
self.move_enemy()
win.blit(self.imageRandom, (self.x, self.y))
def move_enemy(self):
if self.vel > 0:
if self.y + self.vel < 560:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
elif self.y - self.vel > 10:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class player(pygame.sprite.Sprite):
image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png'))
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.move = 10
def draw(self, win):
win.blit(self.image, (self.x, self.y))
class projectile(pygame.sprite.Sprite):
def __init__(self, x, y, ux, uy):
pygame.sprite.Sprite.__init__(self)
self.x = x + 30
self.y = y
self.startX = self.x
self.startY = self.y
self.horVel = ux
self.verVel = uy
self.color = random.choice(colorList)
self.bulletTime = 0.0
self.status = 1
def update(self):
global maxHeight
global maxHeightPos
global landingPos
global ranges
global trace
if self.y <= screen_height:
self.bulletTime += timeStep
self.x = self.horVel * self.bulletTime * pixelRatio + self.startX
self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.
bulletTime ** 2) * pixelRatio + self.startY
trace.append([self.x, self.y])
if self.x >= screen_width:
self.status = 0
if self.y < 0:
self.status = 0
else:
self.status = 0
pygame.display.update()
def draw(self, win):
pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)
for t in traceShow:
pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)
class enemy(pygame.sprite.Sprite):
im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
imageList = [im, im2, im3]
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.hitbox = self.x, self.y, 60, 60
self.vel = 6
self.imageRandom = random.choice(self.imageList)
def draw(self, win):
self.move_enemy()
win.blit(self.imageRandom, (self.x, self.y))
def move_enemy(self):
if self.vel > 0:
if self.y + self.vel < 560:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
elif self.y - self.vel > 10:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class player(pygame.sprite.Sprite):
image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png'))
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.move = 10
def draw(self, win):
win.blit(self.image, (self.x, self.y))
class projectile(pygame.sprite.Sprite):
def __init__(self, x, y, ux, uy):
pygame.sprite.Sprite.__init__(self)
self.x = x + 30
self.y = y
self.startX = self.x
self.startY = self.y
self.horVel = ux
self.verVel = uy
self.color = random.choice(colorList)
self.bulletTime = 0.0
self.status = 1
def update(self):
global maxHeight
global maxHeightPos
global landingPos
global ranges
global trace
if self.y <= screen_height:
self.bulletTime += timeStep
self.x = self.horVel * self.bulletTime * pixelRatio + self.startX
self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.
bulletTime ** 2) * pixelRatio + self.startY
trace.append([self.x, self.y])
if self.x >= screen_width:
self.status = 0
if self.y < 0:
self.status = 0
else:
self.status = 0
pygame.display.update()
def draw(self, win):
pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)
for t in traceShow:
pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)
class enemy(pygame.sprite.Sprite):
im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
imageList = [im, im2, im3]
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.hitbox = self.x, self.y, 60, 60
self.vel = 6
self.imageRandom = random.choice(self.imageList)
def draw(self, win):
self.move_enemy()
win.blit(self.imageRandom, (self.x, self.y))
def move_enemy(self):
if self.vel > 0:
if self.y + self.vel < 560:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
elif self.y - self.vel > 10:
self.y += self.vel
self.hitbox = self.x, self.y, 60, 60
else:
self.vel = self.vel * -1
def display(s):
win.blit(background, (0, 0))
player1.draw(win)
Monster1.draw(win)
Monster2.draw(win)
Monster3.draw(win)
score = font.render('Score : ' + str(s), 1, (0, 0, 0))
win.blit(score, (430, 30))
for bullet in bullets:
bullet.draw(win)
pygame.display.update()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import pygame
import os
import random
#Vx = float(input("Input Vx : "))
#Vy = float(input("Input Vy : "))
Vx = 20
Vy = 20
#GEOMETRY
screen_width = 1000
screen_height = 600
FPS = 30
#COLOR
BLUE = (0, 0, 255)
BLACK = (0, 0, 0)
GREEN = (204, 153, 255)
RED = (255, 0, 0)
WHITE = (155, 25, 0)
colorList = [BLUE, BLACK, GREEN, RED, WHITE]
#Initialize pygame
pygame.init()
path = os.path.dirname(__file__)
img_path = os.path.join(path, 'Gallery')
background = pygame.image.load('Gallery/parallax.png')
background = pygame.transform.scale(background, [screen_width, screen_height])
win = pygame.display.set_mode([screen_width, screen_height])
pygame.display.set_caption("Stateczek shoot Projectile")
clock = pygame.time.Clock()
pixelRatio = 10
accel = -9.81
timeStep = 1 / FPS
font = pygame.font.SysFont('comic', 50, False, False)
#####CREATE SPRITE#####
class player(pygame.sprite.Sprite):
image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png')) # เรียกรูปตัวละครมาเก็บในตัวแปร
def __init__(self, x, y): # ฟังก์ชั่นนี้เอาไว้กำหนดตัวแปร
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.move = 10
def draw(self, win):
win.blit(self.image, (self.x, self.y))
#####CREATE PROJECTILE SHOOT#####
class projectile(pygame.sprite.Sprite):
def __init__(self, x, y, ux, uy):
pygame.sprite.Sprite.__init__(self)
self.x = x + 30
self.y = y
self.startX = self.x
self.startY = self.y
self.horVel = ux
self.verVel = uy
self.color = random.choice(colorList)
self.bulletTime = 0.0
self.status = 1
def update(self):
global maxHeight
global maxHeightPos
global landingPos
global ranges
global trace
if self.y <= screen_height:
self.bulletTime += timeStep
self.x = (self.horVel * self.bulletTime) * pixelRatio + self.startX
self.y = -(self.verVel * self.bulletTime + 0.5 * accel * (
self.bulletTime ** 2)) * pixelRatio + self.startY
trace.append([self.x, self.y])
if self.x >= screen_width:
self.status = 0
if self.y < 0:
self.status = 0
else: # กระสุนลงพื้น
self.status = 0
pygame.display.update()
def draw(self, win):
pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)
for t in traceShow:
pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)
#####CREATE ENEMYS#####
class enemy(pygame.sprite.Sprite):
im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))
imageList = [im, im2, im3]
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.hitbox = (self.x, self.y, 60, 60)
self.vel = 6
self.imageRandom = random.choice(self.imageList)
def draw(self, win):
self.move_enemy()
win.blit(self.imageRandom, (self.x, self.y))
def move_enemy(self):
if self.vel > 0:
if self.y + self.vel < 560:
self.y += self.vel
self.hitbox = (self.x, self.y, 60, 60)
else:
self.vel = self.vel * -1
else:
if self.y - self.vel > 10:
self.y += self.vel
self.hitbox = (self.x, self.y, 60, 60)
else:
self.vel = self.vel * -1
#####FUNCTION SHOW DISPLAY####
def display(s):
win.blit(background, (0, 0))
player1.draw(win)
Monster1.draw(win)
Monster2.draw(win)
Monster3.draw(win)
score = font.render('Score : ' + str(s), 1, (0, 0, 0))
win.blit(score, (430, 30))
for bullet in bullets:
bullet.draw(win)
pygame.display.update()
# mainloop
Y = 300
X = 30
X1 = random.randint(500, 590)
X2 = random.randint(660, 760)
X3 = random.randint(830, 900)
Y1 = random.randint(60, 720)
Y2 = random.randint(40, 720)
Y3 = random.randint(60, 720)
player1 = player(X, Y)
Monster1 = enemy(X1, Y1)
Monster2 = enemy(X2, Y2)
Monster3 = enemy(X3, Y3)
bullets = []
trace = []
traceShow = []
color = []
resetTrace = False
shootStage = 0
showText = 0
maxHeight = 0
ranges = 0
r = 1
s = 0
### START ###
runing = True
while runing:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
runing = False
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
if player1.y > 0:
player1.y -= player1.move
else:
player1.y = 0
if keys[pygame.K_DOWN]:
if player1.y < screen_height-30:
player1.y += player1.move
print(player1.y)
else:
player1.y = screen_height-30
print(player1.y)
if keys[pygame.K_RIGHT]:
if player1.x < screen_width-540:
player1.x += player1.move
print(player1.x)
else:
player1.x = screen_width-540
print(player1.x)
if keys[pygame.K_LEFT]:
if player1.x > 0:
player1.x -= player1.move
else:
player1.x = 0
if keys[pygame.K_SPACE]:
if shootStage == 0:
bullets.append(projectile(player1.x, player1.y, Vx, Vy))
shootStage = 1
trace.clear()
for bullet in bullets:
bullet.update()
traceShow = trace
if bullet.y - 5 < Monster1.hitbox[1] + Monster1.hitbox[3] and bullet.y + 5 > Monster1.hitbox[1]:
if bullet.x + 5 > Monster1.hitbox[0] and bullet.x - 5 < Monster1.hitbox[0] + Monster1.hitbox[2]:
bullet.status = 0
X1 = random.randint(500, 590)
Y1 = random.randint(60, 720)
Monster1 = enemy(X1, Y1)
s += 1
if bullet.y - 5 < Monster2.hitbox[1] + Monster2.hitbox[3] and bullet.y + 5 > Monster2.hitbox[1]:
if bullet.x + 5 > Monster2.hitbox[0] and bullet.x - 5 < Monster2.hitbox[0] + Monster2.hitbox[2]:
bullet.status = 0
X2 = random.randint(660, 760)
Y2 = random.randint(60, 720)
Monster2 = enemy(X2, Y2)
s += 1
if bullet.y - 5 < Monster3.hitbox[1] + Monster3.hitbox[3] and bullet.y + 5 > Monster3.hitbox[
1]:
if bullet.x + 5 > Monster3.hitbox[0] and bullet.x - 5 < Monster3.hitbox[0] + Monster3.hitbox[
2]:
bullet.status = 0
X3 = random.randint(830, 900)
Y3 = random.randint(60, 720)
Monster3 = enemy(X3, Y3)
s += 1
if bullet.status == 0:
shootStage = 0
bullets.pop(bullets.index(bullet))
display(s)
pygame.display.update()
pygame.quit()
|
flexible
|
{
"blob_id": "0dd5511c0e39f113c46785be78a898e79bc45a21",
"index": 5188,
"step-1": "<mask token>\n\n\nclass projectile(pygame.sprite.Sprite):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass projectile(pygame.sprite.Sprite):\n\n def __init__(self, x, y, ux, uy):\n pygame.sprite.Sprite.__init__(self)\n self.x = x + 30\n self.y = y\n self.startX = self.x\n self.startY = self.y\n self.horVel = ux\n self.verVel = uy\n self.color = random.choice(colorList)\n self.bulletTime = 0.0\n self.status = 1\n\n def update(self):\n global maxHeight\n global maxHeightPos\n global landingPos\n global ranges\n global trace\n if self.y <= screen_height:\n self.bulletTime += timeStep\n self.x = self.horVel * self.bulletTime * pixelRatio + self.startX\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.\n bulletTime ** 2) * pixelRatio + self.startY\n trace.append([self.x, self.y])\n if self.x >= screen_width:\n self.status = 0\n if self.y < 0:\n self.status = 0\n else:\n self.status = 0\n pygame.display.update()\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\n for t in traceShow:\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass player(pygame.sprite.Sprite):\n image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png'))\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.move = 10\n\n def draw(self, win):\n win.blit(self.image, (self.x, self.y))\n\n\nclass projectile(pygame.sprite.Sprite):\n\n def __init__(self, x, y, ux, uy):\n pygame.sprite.Sprite.__init__(self)\n self.x = x + 30\n self.y = y\n self.startX = self.x\n self.startY = self.y\n self.horVel = ux\n self.verVel = uy\n self.color = random.choice(colorList)\n self.bulletTime = 0.0\n self.status = 1\n\n def update(self):\n global maxHeight\n global maxHeightPos\n global landingPos\n global ranges\n global trace\n if self.y <= screen_height:\n self.bulletTime += timeStep\n self.x = self.horVel * self.bulletTime * pixelRatio + self.startX\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.\n bulletTime ** 2) * pixelRatio + self.startY\n trace.append([self.x, self.y])\n if self.x >= screen_width:\n self.status = 0\n if self.y < 0:\n self.status = 0\n else:\n self.status = 0\n pygame.display.update()\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\n for t in traceShow:\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass player(pygame.sprite.Sprite):\n image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png'))\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.move = 10\n\n def draw(self, win):\n win.blit(self.image, (self.x, self.y))\n\n\nclass projectile(pygame.sprite.Sprite):\n\n def __init__(self, x, y, ux, uy):\n pygame.sprite.Sprite.__init__(self)\n self.x = x + 30\n self.y = y\n self.startX = self.x\n self.startY = self.y\n self.horVel = ux\n self.verVel = uy\n self.color = random.choice(colorList)\n self.bulletTime = 0.0\n self.status = 1\n\n def update(self):\n global maxHeight\n global maxHeightPos\n global landingPos\n global ranges\n global trace\n if self.y <= screen_height:\n self.bulletTime += timeStep\n self.x = self.horVel * self.bulletTime * pixelRatio + self.startX\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.\n bulletTime ** 2) * pixelRatio + self.startY\n trace.append([self.x, self.y])\n if self.x >= screen_width:\n self.status = 0\n if self.y < 0:\n self.status = 0\n else:\n self.status = 0\n pygame.display.update()\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\n for t in traceShow:\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\ndef display(s):\n win.blit(background, (0, 0))\n player1.draw(win)\n Monster1.draw(win)\n Monster2.draw(win)\n Monster3.draw(win)\n score = font.render('Score : ' + str(s), 1, (0, 0, 0))\n win.blit(score, (430, 30))\n for bullet in bullets:\n bullet.draw(win)\n pygame.display.update()\n\n\n<mask token>\n",
"step-5": "import pygame\r\nimport os\r\nimport random\r\n\r\n\r\n#Vx = float(input(\"Input Vx : \"))\r\n#Vy = float(input(\"Input Vy : \"))\r\nVx = 20\r\nVy = 20\r\n\r\n#GEOMETRY\r\nscreen_width = 1000\r\nscreen_height = 600\r\nFPS = 30\r\n\r\n#COLOR\r\nBLUE = (0, 0, 255)\r\nBLACK = (0, 0, 0)\r\nGREEN = (204, 153, 255)\r\nRED = (255, 0, 0)\r\nWHITE = (155, 25, 0)\r\ncolorList = [BLUE, BLACK, GREEN, RED, WHITE]\r\n\r\n#Initialize pygame\r\npygame.init()\r\npath = os.path.dirname(__file__)\r\nimg_path = os.path.join(path, 'Gallery')\r\nbackground = pygame.image.load('Gallery/parallax.png')\r\nbackground = pygame.transform.scale(background, [screen_width, screen_height])\r\nwin = pygame.display.set_mode([screen_width, screen_height])\r\npygame.display.set_caption(\"Stateczek shoot Projectile\")\r\nclock = pygame.time.Clock()\r\n\r\npixelRatio = 10\r\naccel = -9.81\r\ntimeStep = 1 / FPS\r\nfont = pygame.font.SysFont('comic', 50, False, False)\r\n\r\n#####CREATE SPRITE#####\r\nclass player(pygame.sprite.Sprite):\r\n image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png')) # เรียกรูปตัวละครมาเก็บในตัวแปร\r\n def __init__(self, x, y): # ฟังก์ชั่นนี้เอาไว้กำหนดตัวแปร\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x = x\r\n self.y = y\r\n self.move = 10\r\n\r\n def draw(self, win):\r\n win.blit(self.image, (self.x, self.y))\r\n\r\n#####CREATE PROJECTILE SHOOT#####\r\nclass projectile(pygame.sprite.Sprite):\r\n def __init__(self, x, y, ux, uy):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x = x + 30\r\n self.y = y\r\n self.startX = self.x\r\n self.startY = self.y\r\n self.horVel = ux\r\n self.verVel = uy\r\n self.color = random.choice(colorList)\r\n self.bulletTime = 0.0\r\n self.status = 1\r\n\r\n def update(self):\r\n global maxHeight\r\n global maxHeightPos\r\n global landingPos\r\n global ranges\r\n global trace\r\n if self.y <= screen_height:\r\n self.bulletTime += timeStep\r\n self.x = (self.horVel * self.bulletTime) * pixelRatio + self.startX\r\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * (\r\n self.bulletTime ** 2)) * pixelRatio + self.startY\r\n\r\n trace.append([self.x, self.y])\r\n if self.x >= screen_width:\r\n self.status = 0\r\n if self.y < 0:\r\n self.status = 0\r\n else: # กระสุนลงพื้น\r\n self.status = 0\r\n\r\n pygame.display.update()\r\n\r\n def draw(self, win):\r\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\r\n for t in traceShow:\r\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\r\n\r\n\r\n#####CREATE ENEMYS#####\r\nclass enemy(pygame.sprite.Sprite):\r\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\r\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\r\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\r\n imageList = [im, im2, im3]\r\n\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x = x\r\n self.y = y\r\n self.hitbox = (self.x, self.y, 60, 60)\r\n self.vel = 6\r\n self.imageRandom = random.choice(self.imageList)\r\n\r\n def draw(self, win):\r\n self.move_enemy()\r\n win.blit(self.imageRandom, (self.x, self.y))\r\n\r\n def move_enemy(self):\r\n if self.vel > 0:\r\n if self.y + self.vel < 560:\r\n self.y += self.vel\r\n self.hitbox = (self.x, self.y, 60, 60)\r\n else:\r\n self.vel = self.vel * -1\r\n else:\r\n if self.y - self.vel > 10:\r\n self.y += self.vel\r\n self.hitbox = (self.x, self.y, 60, 60)\r\n else:\r\n self.vel = self.vel * -1\r\n\r\n\r\n#####FUNCTION SHOW DISPLAY####\r\ndef display(s):\r\n win.blit(background, (0, 0))\r\n player1.draw(win)\r\n Monster1.draw(win)\r\n Monster2.draw(win)\r\n Monster3.draw(win)\r\n score = font.render('Score : ' + str(s), 1, (0, 0, 0))\r\n win.blit(score, (430, 30))\r\n for bullet in bullets:\r\n bullet.draw(win)\r\n pygame.display.update()\r\n\r\n\r\n# mainloop\r\nY = 300\r\nX = 30\r\nX1 = random.randint(500, 590)\r\nX2 = random.randint(660, 760)\r\nX3 = random.randint(830, 900)\r\nY1 = random.randint(60, 720)\r\nY2 = random.randint(40, 720)\r\nY3 = random.randint(60, 720)\r\n\r\nplayer1 = player(X, Y)\r\nMonster1 = enemy(X1, Y1)\r\nMonster2 = enemy(X2, Y2)\r\nMonster3 = enemy(X3, Y3)\r\n\r\nbullets = []\r\ntrace = []\r\ntraceShow = []\r\ncolor = []\r\nresetTrace = False\r\nshootStage = 0\r\nshowText = 0\r\nmaxHeight = 0\r\nranges = 0\r\nr = 1\r\ns = 0\r\n\r\n### START ###\r\nruning = True\r\nwhile runing:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n runing = False\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_UP]:\r\n if player1.y > 0:\r\n player1.y -= player1.move\r\n else:\r\n player1.y = 0\r\n\r\n if keys[pygame.K_DOWN]:\r\n if player1.y < screen_height-30:\r\n player1.y += player1.move\r\n print(player1.y)\r\n else:\r\n player1.y = screen_height-30\r\n print(player1.y)\r\n if keys[pygame.K_RIGHT]:\r\n if player1.x < screen_width-540:\r\n player1.x += player1.move\r\n print(player1.x)\r\n else:\r\n player1.x = screen_width-540\r\n print(player1.x)\r\n if keys[pygame.K_LEFT]:\r\n if player1.x > 0:\r\n player1.x -= player1.move\r\n else:\r\n player1.x = 0\r\n\r\n if keys[pygame.K_SPACE]:\r\n if shootStage == 0:\r\n bullets.append(projectile(player1.x, player1.y, Vx, Vy))\r\n shootStage = 1\r\n trace.clear()\r\n\r\n for bullet in bullets:\r\n bullet.update()\r\n traceShow = trace\r\n if bullet.y - 5 < Monster1.hitbox[1] + Monster1.hitbox[3] and bullet.y + 5 > Monster1.hitbox[1]:\r\n if bullet.x + 5 > Monster1.hitbox[0] and bullet.x - 5 < Monster1.hitbox[0] + Monster1.hitbox[2]:\r\n bullet.status = 0\r\n X1 = random.randint(500, 590)\r\n Y1 = random.randint(60, 720)\r\n\r\n Monster1 = enemy(X1, Y1)\r\n s += 1\r\n if bullet.y - 5 < Monster2.hitbox[1] + Monster2.hitbox[3] and bullet.y + 5 > Monster2.hitbox[1]:\r\n if bullet.x + 5 > Monster2.hitbox[0] and bullet.x - 5 < Monster2.hitbox[0] + Monster2.hitbox[2]:\r\n bullet.status = 0\r\n X2 = random.randint(660, 760)\r\n Y2 = random.randint(60, 720)\r\n Monster2 = enemy(X2, Y2)\r\n s += 1\r\n if bullet.y - 5 < Monster3.hitbox[1] + Monster3.hitbox[3] and bullet.y + 5 > Monster3.hitbox[\r\n 1]:\r\n if bullet.x + 5 > Monster3.hitbox[0] and bullet.x - 5 < Monster3.hitbox[0] + Monster3.hitbox[\r\n 2]:\r\n bullet.status = 0\r\n X3 = random.randint(830, 900)\r\n Y3 = random.randint(60, 720)\r\n Monster3 = enemy(X3, Y3)\r\n s += 1\r\n if bullet.status == 0:\r\n shootStage = 0\r\n bullets.pop(bullets.index(bullet))\r\n\r\n display(s)\r\n pygame.display.update()\r\n\r\npygame.quit()\r\n\r\n",
"step-ids": [
6,
9,
13,
14,
18
]
}
|
[
6,
9,
13,
14,
18
] |
#########################################################
# Author: Todd A. Reisel
# Date: 2/24/2003
# Class: StaticTemplateList
#########################################################
from BaseClasses.TemplateList import *;
class StaticTemplateList(TemplateList):
def __init__(self, viewMode = None):
TemplateList.__init__(self, viewMode);
def getList(self):
return [ ["graphical", "interface.html"], ["ada", "interface.html"] ];
def getFeatureName(self):
return "static";
|
normal
|
{
"blob_id": "7de3c0ab2e7c8ac00d37f1dfb5948027cfa7806c",
"index": 5084,
"step-1": "<mask token>\n\n\nclass StaticTemplateList(TemplateList):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StaticTemplateList(TemplateList):\n\n def __init__(self, viewMode=None):\n TemplateList.__init__(self, viewMode)\n <mask token>\n\n def getFeatureName(self):\n return 'static'\n",
"step-3": "<mask token>\n\n\nclass StaticTemplateList(TemplateList):\n\n def __init__(self, viewMode=None):\n TemplateList.__init__(self, viewMode)\n\n def getList(self):\n return [['graphical', 'interface.html'], ['ada', 'interface.html']]\n\n def getFeatureName(self):\n return 'static'\n",
"step-4": "from BaseClasses.TemplateList import *\n\n\nclass StaticTemplateList(TemplateList):\n\n def __init__(self, viewMode=None):\n TemplateList.__init__(self, viewMode)\n\n def getList(self):\n return [['graphical', 'interface.html'], ['ada', 'interface.html']]\n\n def getFeatureName(self):\n return 'static'\n",
"step-5": "#########################################################\n# Author: Todd A. Reisel\n# Date: 2/24/2003\n# Class: StaticTemplateList\n#########################################################\n\nfrom BaseClasses.TemplateList import *;\n\nclass StaticTemplateList(TemplateList):\n def __init__(self, viewMode = None):\n TemplateList.__init__(self, viewMode);\n \n def getList(self):\n return [ [\"graphical\", \"interface.html\"], [\"ada\", \"interface.html\"] ];\n \n def getFeatureName(self):\n return \"static\";\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
"""
A module to generate simulated 2D time-series SOSS data
Authors: Joe Filippazzo
"""
import os
from pkg_resources import resource_filename
import multiprocessing
import time
from functools import partial
import warnings
import numpy as np
from astropy.io import fits
from bokeh.plotting import figure, show
from hotsoss import utils
from svo_filters import svo
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import rotate
from scipy.interpolate import interp2d, RectBivariateSpline
try:
import webbpsf
except ImportError:
print("Could not import `webbpsf` package. Functionality limited.")
warnings.simplefilter('ignore')
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
# Get the file
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
# Dimensions
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
# Get the wave map
wave_map = utils.wave_solutions(subarray, order).astype(float)
# Get the y-coordinate of the trace polynomial in this column
# (center of the trace)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
# Interpolate to get the wavelength value at the center
wave = interp2d(X, Y, wave_map)
# Get the wavelength of the trace center in each column
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
# For each column wavelength (defined by the wavelength at
# the trace center) define an isowavelength contour
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
# Edge cases
try:
w0 = trace_wave[x-1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x+1]
except IndexError:
w1 = 10
# Define the width of the wavelength bin as half-way
# between neighboring points
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
# Get the coordinates of all the pixels in that range
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
# Find the angle between the vertical and the tilted wavelength bin
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
# Don't flip them upside down
angle = angle % 180
# Add to the array
angles.append(angle)
# Save the file
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
# Calculate the psf tilts
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
# Calculate the raw psfs from WebbPSF
if raw:
generate_SOSS_psfs(filt)
# Generate the rotated and interpolated psfs ready for trace assembly
if final:
SOSS_psf_cube(filt=filt, generate=True)
def generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='', subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):
"""
Generate a lookup table of limb darkening coefficients for full
SOSS wavelength range
Parameters
----------
wavelengths: sequence
The wavelengths at which to calculate the LDCs
ld_profile: str
A limb darkening profile name supported by
`ExoCTK.ldc.ldcfit.ld_profile()`
grid_point: dict, sequence
The stellar parameters [Teff, logg, FeH] or stellar model
dictionary from `ExoCTK.modelgrid.ModelGrid.get()`
n_bins: int
The number of bins to break up the grism into
save: str
The path to save to file to
Example
-------
from awesimsoss.sim2D import awesim
lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])
"""
try:
from exoctk import modelgrid
from exoctk.limb_darkening import limb_darkening_fit as lf
except ImportError:
return
# Get the model grid
if not isinstance(model_grid, modelgrid.ModelGrid):
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700)
# Load the model grid
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700, wave_rng=(0.6, 2.8))
# Get the grid point
if isinstance(grid_point, (list, tuple, np.ndarray)):
grid_point = model_grid.get(*grid_point)
# Abort if no stellar dict
if not isinstance(grid_point, dict):
print('Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).')
return
# Break the bandpass up into n_bins pieces
bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)
# Calculate the LDCs
ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],
bandpass=bandpass, grid_point=grid_point.copy(),
mu_min=0.08, verbose=False)
# Interpolate the LDCs to the desired wavelengths
coeff_table = ldc_results[ld_profile]['coeffs']
coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]
coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table[c]) for c in coeff_cols]
return np.array(coeffs).T
def generate_SOSS_psfs(filt):
"""
Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
"""
# Get the file
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))
# Get the NIRISS class from webbpsf and set the filter
ns = webbpsf.NIRISS()
ns.filter = filt
ns.pupil_mask = 'GR700XD'
# Get the min and max wavelengths
wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()
wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1E6, np.min(wavelengths[wavelengths > 0])])
wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1E6, np.max(wavelengths[wavelengths > 0])])
# webbpsf.calc_datacube can only handle 100 but that's sufficient
W = np.linspace(wave_min, wave_max, 100)*1E-6
# Calculate the psfs
print("Generating SOSS psfs. This takes about 8 minutes...")
start = time.time()
PSF = ns.calc_datacube(W, oversample=1)[0].data
print("Finished in", time.time()-start)
# Make the HDUList
psfhdu = fits.PrimaryHDU(data=PSF)
wavhdu = fits.ImageHDU(data=W*1E6, name='WAV')
hdulist = fits.HDUList([psfhdu, wavhdu])
# Write the file
hdulist.writeto(file, overwrite=True)
hdulist.close()
def get_angle(pf, p0=np.array([0, 0]), pi=None):
"""Compute angle (in degrees) for pf-p0-pi corner
Parameters
----------
pf: sequence
The coordinates of a point on the rotated vector
p0: sequence
The coordinates of the pivot
pi: sequence
The coordinates of the fixed vector
Returns
-------
float
The angle in degrees
"""
if pi is None:
pi = p0 + np.array([0, 1])
v0 = np.array(pf) - np.array(p0)
v1 = np.array(pi) - np.array(p0)
angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
angle = np.degrees(angle)
return angle
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
# Get the file
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))
# Load the SOSS psf cube
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
# Initilize interpolator
psfs = interp1d(wave, cube, axis=0, kind=3)
# Check the wavelength
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
# Interpolate and scale psf
psf = psfs(wavelength)
psf *= 1./np.sum(psf)
# Remove background
# psf[psf < cutoff] = 0
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
def make_frame(psfs):
"""
Generate a frame from an array of psfs
Parameters
----------
psfs: sequence
An array of psfs of shape (2048, 76, 76)
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
# Empty frame
frame = np.zeros((256, 2124))
# Add each psf
for n, psf in enumerate(psfs):
frame[:, n:n+76] += psf
return frame[:, 38:-38]
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
# Expand to shape of time axis
flux = np.tile(psf, (len(time), 1, 1))
# If there is a transiting planet...
if ld_coeffs is not None and rp is not None and str(type(tmodel)) == "<class 'batman.transitmodel.TransitModel'>":
# Set the wavelength dependent orbital parameters
tmodel.u = ld_coeffs
tmodel.rp = rp
# Generate the light curve for this pixel
lightcurve = tmodel.light_curve(tmodel)
# Scale the flux with the lightcurve
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
# Get the file
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
# Create spline generator
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
# Create output frame, shifted as necessary
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid-y
# Resample onto the subarray
frame = spline.ev(xg, yg)
# Fill resampled points with zeros
extrapol = (((xg < -0.5) | (xg >= dim - 0.5)) | ((yg < -0.5) | (yg >= dim - 0.5)))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
# Get the wavelengths
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
# Get the file
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
# Load the SOSS psf cube
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
# Initilize interpolator
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
# Run datacube
for n, wavelength in enumerate(wavelengths):
# Evaluate the trace polynomial in each column to get the y-position of the trace center
trace_centers = np.polyval(coeffs[n], trace_cols)
# Don't calculate order2 for F277W or order 3 for either
if (n == 1 and filt.lower() == 'f277w') or n == 2:
pass
else:
# Get the psf for each column
print('Calculating order {} SOSS psfs for {} filter...'.format(n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Get the PSF tilt at each column
angles = psf_tilts(order)
# Rotate the psfs
print('Rotating order {} SOSS psfs for {} filter...'.format(n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs, angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Scale psfs to 1
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs/scale
# Split it into 4 chunks to be below Github file size limit
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N*512
idx1 = idx0+512
centers = trace_centers[idx0:idx1]
# Interpolate the psfs onto the subarray
print('Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'.format(N+1, n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Get the filepath
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, n+1, N+1)
file = resource_filename('awesimsoss', filename)
# Delete the file if it exists
if os.path.isfile(file):
os.system('rm {}'.format(file))
# Write the data
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
# Get the chunked data and concatenate
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
|
normal
|
{
"blob_id": "9f478df4ff19cfe6c6559b6489c874d49377b90e",
"index": 4949,
"step-1": "<mask token>\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\n<mask token>\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\n<mask token>\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-2": "<mask token>\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',\n subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700)\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700, wave_rng=(0.6, 2.8))\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n if not isinstance(grid_point, dict):\n print(\n 'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'\n )\n return\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,\n verbose=False)\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table\n [c]) for c in coeff_cols]\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(\n filt))\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(\n wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(\n wavelengths[wavelengths > 0])])\n W = np.linspace(wave_min, wave_max, 100) * 1e-06\n print('Generating SOSS psfs. This takes about 8 minutes...')\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print('Finished in', time.time() - start)\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\n<mask token>\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n frame = np.zeros((256, 2124))\n for n, psf in enumerate(psfs):\n frame[:, n:n + 76] += psf\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-3": "<mask token>\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',\n subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700)\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700, wave_rng=(0.6, 2.8))\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n if not isinstance(grid_point, dict):\n print(\n 'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'\n )\n return\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,\n verbose=False)\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table\n [c]) for c in coeff_cols]\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(\n filt))\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(\n wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(\n wavelengths[wavelengths > 0])])\n W = np.linspace(wave_min, wave_max, 100) * 1e-06\n print('Generating SOSS psfs. This takes about 8 minutes...')\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print('Finished in', time.time() - start)\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\ndef get_angle(pf, p0=np.array([0, 0]), pi=None):\n \"\"\"Compute angle (in degrees) for pf-p0-pi corner\n\n Parameters\n ----------\n pf: sequence\n The coordinates of a point on the rotated vector\n p0: sequence\n The coordinates of the pivot\n pi: sequence\n The coordinates of the fixed vector\n\n Returns\n -------\n float\n The angle in degrees\n \"\"\"\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n return angle\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n frame = np.zeros((256, 2124))\n for n, psf in enumerate(psfs):\n frame[:, n:n + 76] += psf\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-4": "<mask token>\nimport os\nfrom pkg_resources import resource_filename\nimport multiprocessing\nimport time\nfrom functools import partial\nimport warnings\nimport numpy as np\nfrom astropy.io import fits\nfrom bokeh.plotting import figure, show\nfrom hotsoss import utils\nfrom svo_filters import svo\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage.interpolation import rotate\nfrom scipy.interpolate import interp2d, RectBivariateSpline\ntry:\n import webbpsf\nexcept ImportError:\n print('Could not import `webbpsf` package. Functionality limited.')\nwarnings.simplefilter('ignore')\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',\n subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700)\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700, wave_rng=(0.6, 2.8))\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n if not isinstance(grid_point, dict):\n print(\n 'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'\n )\n return\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,\n verbose=False)\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table\n [c]) for c in coeff_cols]\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(\n filt))\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(\n wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(\n wavelengths[wavelengths > 0])])\n W = np.linspace(wave_min, wave_max, 100) * 1e-06\n print('Generating SOSS psfs. This takes about 8 minutes...')\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print('Finished in', time.time() - start)\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\ndef get_angle(pf, p0=np.array([0, 0]), pi=None):\n \"\"\"Compute angle (in degrees) for pf-p0-pi corner\n\n Parameters\n ----------\n pf: sequence\n The coordinates of a point on the rotated vector\n p0: sequence\n The coordinates of the pivot\n pi: sequence\n The coordinates of the fixed vector\n\n Returns\n -------\n float\n The angle in degrees\n \"\"\"\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n return angle\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n frame = np.zeros((256, 2124))\n for n, psf in enumerate(psfs):\n frame[:, n:n + 76] += psf\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-5": "\"\"\"\nA module to generate simulated 2D time-series SOSS data\n\nAuthors: Joe Filippazzo\n\"\"\"\n\nimport os\nfrom pkg_resources import resource_filename\nimport multiprocessing\nimport time\nfrom functools import partial\nimport warnings\n\nimport numpy as np\nfrom astropy.io import fits\nfrom bokeh.plotting import figure, show\nfrom hotsoss import utils\nfrom svo_filters import svo\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage.interpolation import rotate\nfrom scipy.interpolate import interp2d, RectBivariateSpline\n\ntry:\n import webbpsf\nexcept ImportError:\n print(\"Could not import `webbpsf` package. Functionality limited.\")\n\nwarnings.simplefilter('ignore')\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n # Calculate the psf tilts\n if tilts:\n calculate_psf_tilts()\n\n for filt in ['CLEAR', 'F277W']:\n\n # Calculate the raw psfs from WebbPSF\n if raw:\n generate_SOSS_psfs(filt)\n\n # Generate the rotated and interpolated psfs ready for trace assembly\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='', subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n\n # Get the model grid\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700)\n\n # Load the model grid\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700, wave_rng=(0.6, 2.8))\n\n # Get the grid point\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n\n # Abort if no stellar dict\n if not isinstance(grid_point, dict):\n print('Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).')\n return\n\n # Break the bandpass up into n_bins pieces\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n\n # Calculate the LDCs\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(),\n mu_min=0.08, verbose=False)\n\n # Interpolate the LDCs to the desired wavelengths\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table[c]) for c in coeff_cols]\n\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n # Get the file\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))\n\n # Get the NIRISS class from webbpsf and set the filter\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n\n # Get the min and max wavelengths\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1E6, np.min(wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1E6, np.max(wavelengths[wavelengths > 0])])\n\n # webbpsf.calc_datacube can only handle 100 but that's sufficient\n W = np.linspace(wave_min, wave_max, 100)*1E-6\n\n # Calculate the psfs\n print(\"Generating SOSS psfs. This takes about 8 minutes...\")\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print(\"Finished in\", time.time()-start)\n\n # Make the HDUList\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W*1E6, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n\n # Write the file\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\ndef get_angle(pf, p0=np.array([0, 0]), pi=None):\n \"\"\"Compute angle (in degrees) for pf-p0-pi corner\n\n Parameters\n ----------\n pf: sequence\n The coordinates of a point on the rotated vector\n p0: sequence\n The coordinates of the pivot\n pi: sequence\n The coordinates of the fixed vector\n\n Returns\n -------\n float\n The angle in degrees\n \"\"\"\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n\n return angle\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n\n # Get the file\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))\n\n # Load the SOSS psf cube\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n\n # Initilize interpolator\n psfs = interp1d(wave, cube, axis=0, kind=3)\n\n # Check the wavelength\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n\n # Interpolate and scale psf\n psf = psfs(wavelength)\n psf *= 1./np.sum(psf)\n\n # Remove background\n # psf[psf < cutoff] = 0\n\n if plot:\n\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n # Empty frame\n frame = np.zeros((256, 2124))\n\n # Add each psf\n for n, psf in enumerate(psfs):\n frame[:, n:n+76] += psf\n\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n # Expand to shape of time axis\n flux = np.tile(psf, (len(time), 1, 1))\n\n # If there is a transiting planet...\n if ld_coeffs is not None and rp is not None and str(type(tmodel)) == \"<class 'batman.transitmodel.TransitModel'>\":\n\n # Set the wavelength dependent orbital parameters\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n\n # Generate the light curve for this pixel\n lightcurve = tmodel.light_curve(tmodel)\n\n # Scale the flux with the lightcurve\n flux *= lightcurve[:, None, None]\n\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n # Create spline generator\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n\n # Create output frame, shifted as necessary\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid-y\n\n # Resample onto the subarray\n frame = spline.ev(xg, yg)\n\n # Fill resampled points with zeros\n extrapol = (((xg < -0.5) | (xg >= dim - 0.5)) | ((yg < -0.5) | (yg >= dim - 0.5)))\n frame[extrapol] = 0\n\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n\n print('Coffee time! This takes about 5 minutes.')\n\n # Get the wavelengths\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n\n # Get the file\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n\n # Load the SOSS psf cube\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n\n # Initilize interpolator\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n\n # Run datacube\n for n, wavelength in enumerate(wavelengths):\n\n # Evaluate the trace polynomial in each column to get the y-position of the trace center\n trace_centers = np.polyval(coeffs[n], trace_cols)\n\n # Don't calculate order2 for F277W or order 3 for either\n if (n == 1 and filt.lower() == 'f277w') or n == 2:\n pass\n\n else:\n\n # Get the psf for each column\n print('Calculating order {} SOSS psfs for {} filter...'.format(n+1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time()-start))\n\n # Get the PSF tilt at each column\n angles = psf_tilts(order)\n\n # Rotate the psfs\n print('Rotating order {} SOSS psfs for {} filter...'.format(n+1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs, angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time()-start))\n\n # Scale psfs to 1\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs/scale\n\n # Split it into 4 chunks to be below Github file size limit\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n\n idx0 = N*512\n idx1 = idx0+512\n centers = trace_centers[idx0:idx1]\n\n # Interpolate the psfs onto the subarray\n print('Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'.format(N+1, n+1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time()-start))\n\n # Get the filepath\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, n+1, N+1)\n file = resource_filename('awesimsoss', filename)\n\n # Delete the file if it exists\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n\n # Write the data\n np.save(file, np.array(subarray_psfs))\n\n print('Data saved to', file)\n\n else:\n\n # Get the chunked data and concatenate\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk)\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n\n return np.concatenate(full_data, axis=0)\n",
"step-ids": [
7,
10,
11,
13,
14
]
}
|
[
7,
10,
11,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CameraResponse(Response):
pass
<|reserved_special_token_1|>
from platypush.message.response import Response
class CameraResponse(Response):
pass
<|reserved_special_token_1|>
from platypush.message.response import Response
class CameraResponse(Response):
pass
# vim:sw=4:ts=4:et:
|
flexible
|
{
"blob_id": "4c38d0487f99cdc91cbce50079906f7336e51482",
"index": 5462,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CameraResponse(Response):\n pass\n",
"step-3": "from platypush.message.response import Response\n\n\nclass CameraResponse(Response):\n pass\n",
"step-4": "from platypush.message.response import Response\n\n\nclass CameraResponse(Response):\n pass\n\n\n# vim:sw=4:ts=4:et:\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_ticket():
ticket = ''
s = 'abcdefghijkrmnopqrstuvwxyz1234567890'
for i in range(28):
r_num = random.choice(s)
ticket += r_num
return ticket
<|reserved_special_token_1|>
import random
def get_ticket():
ticket = ''
s = 'abcdefghijkrmnopqrstuvwxyz1234567890'
for i in range(28):
r_num = random.choice(s)
ticket += r_num
return ticket
|
flexible
|
{
"blob_id": "d2a9a2fd3a1118c0855b8f77ce4c25cc6b4e8f87",
"index": 4328,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_ticket():\n ticket = ''\n s = 'abcdefghijkrmnopqrstuvwxyz1234567890'\n for i in range(28):\n r_num = random.choice(s)\n ticket += r_num\n return ticket\n",
"step-3": "import random\n\n\ndef get_ticket():\n ticket = ''\n s = 'abcdefghijkrmnopqrstuvwxyz1234567890'\n for i in range(28):\n r_num = random.choice(s)\n ticket += r_num\n return ticket\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/14 14:31
# @Author : lixiaofeng
# @File : page_zaojiao.py
# @Software: PyCharm
# @desc :
from common.basics import Crazy
class Zaojiaopage(Crazy):
"""早教小程序"""
zao_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="包妈优选"]')
# zao_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="小小包早教"]')
def click_zao(self):
self.click(self.zao_btn_loc)
def element_zao(self):
return self.find_element(self.zao_btn_loc)
find_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/d7b" and @text="发现"]') # 发现按钮
def click_find(self):
self.click(self.find_loc)
title_btn_loc = ('xpath', '//*[@resource-id="android:id/title" and @text="小程序"]') # 发现页小程序按钮
def click_title_btn(self):
self.click(self.title_btn_loc)
helper_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/c5" and @text="小程序助手"]') # 小程序助手
def element_helper(self):
return self.find_element(self.helper_loc)
def click_helper(self):
self.click(self.helper_loc)
small_help_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="小程序助手"]') # 小程序助手
def click_small_help_btn(self):
self.click(self.small_help_btn_loc)
small_name_loc = ('xpath', '//*[contains(@text, "包妈优选")]') # 包妈优选
def element_small_name(self):
return self.find_element(self.small_name_loc)
def click_small_name(self):
self.click(self.small_name_loc)
switching_applet_btn_loc = ('xpath', '//*[contains(@text, "切换小程序")]') # 切换小程序
def click_switching_applet_btn(self):
self.click(self.switching_applet_btn_loc)
delete_small_btn_loc = ('xpath', '//*[contains(@text, "删除")]') # 删除小程序按钮
def click_delete_small_btn(self):
self.click(self.delete_small_btn_loc)
edition_btn_loc = ('xpath', '//*[contains(@text, "百宝福利Buy")]')
def element_edition_btn(self):
return self.find_element(self.edition_btn_loc)
delete_small1_btn_loc = ('xpath', '//*[contains(@text, "拖动到此处删除")]')
def element_delete_small1_btn(self):
return self.find_element(self.delete_small1_btn_loc)
version_btn_loc = ('xpath', '//*[contains(@text, "版本查看")]') # 版本查看按钮
def click_version_btn(self):
self.click(self.version_btn_loc)
experience_version_btn_loc = ('xpath', '//*[contains(@text, "6.0.09")]') # 体验版
def clicks_experience_version_btn(self):
self.clicks(self.experience_version_btn_loc, -1)
audition_class_btn_loc = ('xpath', '//*[contains(@text, "0元领取10节试听课")]') # 领取试听课
def element_audition_class_btn(self):
return self.find_element(self.audition_class_btn_loc)
def click_audition_class_btn(self):
self.click(self.audition_class_btn_loc)
wechat_grant_btn_loc = (('xpath', '//*[contains(@text, "微信授权") and @class="android.widget.Button" ]')) # 微信授权
def click_wechat_grant_btn(self):
self.click(self.wechat_grant_btn_loc)
def double_click_wechat_grant(self):
self.double_click(self.wechat_grant_btn_loc)
def element_wechat_grant_btn(self):
return self.find_element(self.wechat_grant_btn_loc)
allow_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/st" and @text="允许"]') # 完成按钮
def click_allow_btn(self):
self.click(self.allow_btn_loc)
month_btn_loc = ('xpath', '//*[contains(@text, "2018")]') # 选择月份
def click_mouth_btn(self):
self.click(self.month_btn_loc)
sure_btn_loc = ('xpath', '//*[contains(@text, "确定")]') # 确定按钮
def click_sure_btn(self):
self.click(self.sure_btn_loc)
class_info_loc = ('xpath', '//*[contains(@text, "课程介绍")]') # 课程介绍
# class_info_loc = ('xpath', '//android.widget.FrameLayout/android.view.ViewGroup[0]') # 课程介绍
def class_info_btn(self):
self.click(self.class_info_loc)
attend_lectures_btn_loc = ('xpath', '//*[contains(@text, "立即听课")]') # 立即听课
def element_attend_lectures_btn(self):
return self.find_element(self.attend_lectures_btn_loc)
def click_attend_lectures_btn(self):
self.click(self.attend_lectures_btn_loc)
class_btn_loc = ('xpath', '//*[contains(@text, "预备课 预备课")]') # 预备课 预备课
def element_class_btn(self):
return self.find_element(self.class_btn_loc)
get_to_know_btn_loc = ('xpath', '//*[contains(@text, "立即了解正式课 ")]') # 立即了解正式课
def click_get_to_know_btn(self):
self.click(self.get_to_know_btn_loc)
def element_get_to_know_btn(self):
return self.find_element(self.get_to_know_btn_loc)
sure_buy_btn_loc = ('xpath', '//*[contains(@text, "立即购买")]') # 立即购买
def click_sure_buy_btn(self):
self.click(self.sure_buy_btn_loc)
buy_password_loc = ('id', 'com.tencent.mm:id/cfs') # 输入支付密码
def input_buy_password(self, paw):
self.send_keys(self.buy_password_loc, paw)
check_buy_money_loc = ('id', 'com.tencent.mm:id/dlh') # 获取支付金额
def text_buy_money(self):
return self.get_text(self.check_buy_money_loc)
success_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/f8o" and @text="完成"]') # 完成按钮
def click_success_btn(self):
self.click(self.success_btn_loc)
check_address_btn_loc = ('xpath', '//*[contains(@text, "收货地址:请选择地址")]') # 选择收货地址
def click_check_address_btn(self):
self.click(self.check_address_btn_loc)
def element_check_address_btn(self):
return self.find_element(self.check_address_btn_loc)
add_address_btn_loc = ('xpath', '//*[contains(@text, "添加地址")]') # 添加地址
def click_add_address_btn(self):
self.click(self.add_address_btn_loc)
name_loc = ('xpath', '//*[contains(@text, "请输入你的姓名")]') # 请输入你的姓名
def input_name_btn(self, name):
self.send_keys(self.name_loc, name)
phone_btn_loc = ('xpath', '//*[contains(@text, "请填写收件人电话")]') # 请填写收件人电话
def input_phone_btn(self, phone):
self.send_keys(self.phone_btn_loc, phone)
region_btn_loc = ('xpath', '//*[contains(@text, "请输入你所在地区")]') # 请输入你所在地区
def click_region_btn(self):
self.click(self.region_btn_loc)
detailed_address_btn_loc = ('xpath', '//*[contains(@text, "请输入你的详细地址")]') # 请输入你的详细地址
def input_detailed_address_btn(self, address):
self.send_keys(self.detailed_address_btn_loc, address)
save_btn_loc = ('xpath', '//*[contains(@text, "保存")]') # 保存
def click_save_btn(self):
self.click(self.save_btn_loc)
receive_btn_loc = ('xpath', '//*[contains(@text, "立即领取")]') # 立即领取
def click_receive_btn(self):
self.click(self.receive_btn_loc)
addressee_loc = ('xpath', '//*[contains(@text, "收件人:")]') # 地址列表是否有地址信息
def elements_addressee(self):
return self.find_elements(self.addressee_loc)
def clicks_addressee(self):
self.clicks(self.addressee_loc, 0)
know_btn_loc = ('xpath', '//*[contains(@text, "知道了")]') # 地址列表是否有地址信息
def element_know(self):
return self.find_element(self.know_btn_loc)
def click_know(self):
self.click(self.know_btn_loc)
all_curriculum_btn_loc = ('xpath', '//*[contains(@text, "查看全部课程")]') # 查看全部课程
def element_all_curriculum_btn(self):
return self.find_element(self.all_curriculum_btn_loc)
def click_all_curriculum_btn(self):
self.click(self.all_curriculum_btn_loc)
curriculum_date_btn_loc = ('xpath', '//*[contains(@text, "2019-0")]') # 历史推送
def element_curriculum_date_btn(self):
return self.find_element(self.curriculum_date_btn_loc)
my_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/ct" and @text="我的"]') # 我的
def element_my_btn(self):
return self.find_element(self.my_btn_loc)
def click_my(self):
self.click(self.my_btn_loc)
my_baby_btn_loc = ('xpath', '//*[contains(@text, "我的宝宝")]') # 我的宝宝
def click_my_baby(self):
self.click(self.my_baby_btn_loc)
my_baby_title_loc = ('id', 'com.tencent.mm:id/ox')
def text_my_baby_title(self):
return self.get_text(self.my_baby_title_loc)
def elements_title(self):
return self.find_elements(self.my_baby_title_loc)
new_baby_btn_loc = ('xpath', '//*[contains(@text, "新建宝宝")]') # 新建宝宝
def element_new_baby_btn(self):
return self.find_element(self.new_baby_btn_loc)
def click_new_baby_btn(self):
self.click(self.new_baby_btn_loc)
def clicks_new_baby_btn(self, n):
self.clicks(self.new_baby_btn_loc, n)
get_set_loc = ('xpath', '//*[contains(@text, "预备课 预备课")]') # 新建宝宝
def element_get_set(self):
return self.find_element(self.get_set_loc)
next_btn_loc = ('xpath', '//*[contains(@text, "下一步")]') # 我的宝宝
def click_next(self):
self.click(self.next_btn_loc)
baby_name_loc = ('xpath', '//*[contains(@text, "请输入宝宝姓名")]') # 请输入宝宝姓名
def inputs_baby_name(self, name, n):
self.sends_keys(self.baby_name_loc, name, n)
baby_bir_btn_loc = ('xpath', '//*[contains(@text, "宝宝的生日:")]') # 宝宝的生日
def click_baby_bir_btn(self):
self.click(self.baby_bir_btn_loc)
finish_btn_loc = ('xpath', '//*[contains(@text, "完成")]') # 完成按钮
def click_finish_btn(self):
self.click(self.finish_btn_loc)
def clicks_finish_btn(self, n):
self.clicks(self.finish_btn_loc, n)
my_home_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/ct" and @text="首页"]') # 首页
def click_my_home(self):
self.click(self.my_home_loc)
def element_my_home(self):
return self.find_element(self.my_home_loc)
switch_btn_loc = ('xpath', '//*[contains(@text, "切换")]') # 切换
def click_switch_btn(self):
self.click(self.switch_btn_loc)
baby_bri_loc = ('xpath', '//*[contains(@text, "宝宝生日:")]') # 宝宝生日:
def click_baby_bri(self):
self.click(self.baby_bri_loc)
class_img_btn_loc = ('xpath', 'android.widget.Image')
def clicks_class_img(self):
self.clicks(self.class_img_btn_loc, 0)
collection_btn_loc = ('xpath', '//*[contains(@text, "收藏")]') # 收藏
def click_collection_btn(self):
self.click(self.collection_btn_loc)
def clicks_collection_btn(self, n):
self.clicks(self.collection_btn_loc, n)
def element_collection_btn(self):
return self.find_element(self.collection_btn_loc)
write_record_btn_loc = ('xpath', '//*[contains(@text, "写记录") and @class="android.widget.Button" ]') # 写记录按钮
def click_write_record_btn(self):
self.click(self.write_record_btn_loc)
def clicks_write_record_btn(self, n):
self.clicks(self.write_record_btn_loc, n)
album_btn_loc = ('xpath', '//*[contains(@text, "相册")]') # 相册
def click_album_btn(self):
self.click(self.album_btn_loc)
def element_album_btn(self):
return self.find_element(self.album_btn_loc)
small_video_btn_loc = ('xpath', '//*[contains(@text, "小视频")]') # 小视频
def click_small_video_btn(self):
self.click(self.small_video_btn_loc)
def element_small_video_btn(self):
return self.find_element(self.small_video_btn_loc)
release_btn_loc = ('xpath', '//*[contains(@text, "发布")]') # 发布
def click_release_btn(self):
self.click(self.release_btn_loc)
def clicks_release_btn(self, n):
self.clicks(self.release_btn_loc, n)
def element_record_info(self, data): # 判断是否定位到包含text的元素
record_info_loc = ('xpath', '//*[contains(@text, "{}")]'.format(data))
record_info = self.find_element(record_info_loc)
if record_info:
return True
else:
return False
class_name_loc = ('xpath', '//*[contains(@text, "歌曲")]') # 课程名称
# class_name_loc = ('xpath', '//*[contains(@text, "歌曲:Head and shoulders")]') # 课程名称
def click_class_name(self):
self.click(self.class_name_loc)
def clicks_class_name(self, n):
self.clicks(self.class_name_loc, n)
def elements_class_name(self):
return self.find_elements(self.class_name_loc)
class_name2_loc = ('xpath', '//*[contains(@text, "一起走")]') # 课程名称
# class_name2_loc = ('xpath', '//*[contains(@text, "弹出来的画")]') # 课程名称
def click_class2_name(self):
self.click(self.class_name2_loc)
def clicks_class2_name(self, n):
self.clicks(self.class_name2_loc, n)
write_text_loc = ('xpath', '//*[contains(@text, "0/1000")]') # 写记录
def input_write_text(self, text):
self.send_keys(self.write_text_loc, text)
def inputs_write_text(self, text, n):
self.sends_keys(self.write_text_loc, text, n)
choice_album_loc = ('id', 'com.tencent.mm:id/bpy')
def clicks_choice_album(self, n):
self.clicks(self.choice_album_loc, n)
def elements_choice_album(self):
return self.find_elements(self.choice_album_loc)
complete_btn_loc = ('id', 'com.tencent.mm:id/ki') # 完成
def click_complete_btn(self):
self.click(self.complete_btn_loc)
my_collection_btn_loc = ('xpath', '//*[contains(@text, "我的收藏")]') # 我的收藏
def click_my_collection_btn(self):
self.click(self.my_collection_btn_loc)
my_collection_english_course_btn_loc = ('xpath', '//*[contains(@text, "早教")]') # 早教英语课
def elements_my_collection_english_course_btn(self):
return self.find_elements(self.my_collection_english_course_btn_loc)
my_collection_game_course_btn_loc = ('xpath', '//*[contains(@text, "宝宝游戏馆")]') # 宝宝游戏馆
def elements_my_collection_game_course_btn(self):
return self.find_elements(self.my_collection_game_course_btn_loc)
my_course_btn_loc = ('xpath', '//*[contains(@text, "我的课程")]') # 我的课程
def click_my_course_btn(self):
self.click(self.my_course_btn_loc)
my_course_buy_btn_loc = ('xpath', '//*[contains(@text, "早教核心课年卡")]') # 早教核心课年卡
def elements_my_course_buy_btn(self):
return self.find_elements(self.my_course_buy_btn_loc)
my_order_btn_loc = ('xpath', '//*[contains(@text, "我的订单")]') # 我的订单
def click_my_order_btn(self):
self.click(self.my_order_btn_loc)
my_order_card_btn_loc = ('xpath', '//*[contains(@text, "订单编号:")]') # 订单编号:
def elements_my_order_card_btn(self):
return self.find_elements(self.my_order_card_btn_loc)
my_record_btn_loc = ('xpath', '//*[contains(@text, "成长记录")]') # 成长记录
def click_my_record_btn(self):
self.click(self.my_record_btn_loc)
my_record_class_btn_loc = ('xpath', '//*[contains(@text, "#")]') # # 测试英语课程组
def elements_my_record_class_btn(self):
return self.find_elements(self.my_record_class_btn_loc)
back_btn_loc = (
'xpath', '//*[@resource-id="com.tencent.mm:id/on" and @class="android.widget.LinearLayout"]') # 返回按钮
def element_back_btn(self):
return self.find_element(self.back_btn_loc)
def click_back_btn(self):
self.click(self.back_btn_loc)
reply_5_loc = ('xpath', '//android.widget.Image') # 回复5
def click_reply_5(self):
self.click(self.reply_5_loc)
def elements_reply_5(self):
return self.find_elements(self.reply_5_loc)
add_to_btn_loc = ('xpath', '//*[contains(@text, "立即添加")]') # 立即添加
def click_add_to_btn(self):
self.click(self.add_to_btn_loc)
reply_input_5_loc = ('id', 'com.tencent.mm:id/ami')
def input_reply_5(self, num):
self.send_keys(self.reply_input_5_loc, num)
send_5_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/amp" and @text="发送"]') # 发送
def click_send(self):
self.click(self.send_5_loc)
reply_code_loc = ('id', 'com.tencent.mm:id/ap9') # 获取回复的二维码
def elements_reply_code(self):
return self.find_elements(self.reply_code_loc)
def clicks_reply_code(self, n):
self.clicks(self.reply_code_loc, n)
long_code_loc = ('id', 'com.tencent.mm:id/adi') # 长按二维码
def element_long_code(self):
return self.find_element(self.long_code_loc)
def click_long_code(self):
self.click(self.long_code_loc)
discern_code_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="识别图中二维码"]') # 识别图中二维码
def click_discern_code(self):
self.click(self.discern_code_loc)
class_group_loc = ('id', 'android:id/text1') # 群名称
def text_class_group(self):
return self.get_text(self.class_group_loc)
add_group_chat_loc = ('xpath', '//*[contains(@text, "加入该群聊")]') # 加入该群聊
def element_add_group_chat(self):
return self.find_element(self.add_group_chat_loc)
reply_8_loc = ('xpath', '//android.widget.Image') # 回复8的banner 回复8->进公众号->点击推送 看到的二维码
def elements_reply_8(self):
return self.find_elements(self.reply_8_loc)
parent_btn_loc = ('xpath', '//*[contains(@text, "亲爱的家长:")]') # 亲爱的家长:
def element_parent_btn(self):
return self.find_element(self.parent_btn_loc)
info_btn_loc = ('id', 'com.tencent.mm:id/a8q') # 详情
def elements_info_btn(self):
return self.find_elements(self.info_btn_loc)
def clicks_info_btn(self, n):
self.clicks(self.info_btn_loc, n)
more_games_btn_loc = ('xpath', '//*[contains(@text, "更多亲子游戏")]') # 更多亲子游戏
def click_more_games_btn(self):
self.click(self.more_games_btn_loc)
look_all_btn_loc = ('xpath', '//*[contains(@text, "查看全部")]') # 查看全部
def click_look_all_btn(self):
self.click(self.look_all_btn_loc)
def element_look_all_btn(self):
return self.find_elements(self.look_all_btn_loc)
start_fingerprint_buy_loc = ('id', 'com.tencent.mm:id/btp') # 开启指纹支付弹窗文本 开启指纹支付,支付时可通过验证指纹快速完成付款。
def text_start_fingerprint_buy(self):
return self.get_text(self.start_fingerprint_buy_loc)
no_more_reminder_btn_loc = ('id', 'com.tencent.mm:id/btq') # 不再提醒
def click_no_more_reminder_btn(self):
self.click(self.no_more_reminder_btn_loc)
cancel_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/azz" and @text="取消"]') # 取消
def click_cancel_btn(self):
self.click(self.cancel_btn_loc)
usd_password_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/fg4" and @text="使用密码"]') # 使用密码
def element_usd_password(self):
return self.find_element(self.usd_password_loc)
def click_usd_password(self):
self.click(self.usd_password_loc)
password_error_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/d8x" and @text="支付密码错误,请重试"]') # 支付密码错误,请重试
def element_password_error(self):
return self.find_element(self.password_error_loc)
again_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/azz" and @text="重试"]') # 重试
def click_again_btn(self):
self.click(self.again_btn_loc)
payment_loc = ('id', 'com.tencent.mm:id/fg3') # 请输入支付密码 文本
def text_payment(self):
return self.get_text(self.payment_loc)
typewriting_finish_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/z2" and @text="完成"]') # 输入法上的完成按钮
def element_typewriting_finish_btn(self):
return self.find_element(self.typewriting_finish_btn_loc)
def click_typewriting_finish_btn(self):
self.click(self.typewriting_finish_btn_loc)
# 打卡
clock_btn_loc = ('xpath', '//*[contains(@text, "打卡")]') # 打卡
def click_clock_btn(self):
self.click(self.clock_btn_loc)
def element_clock_btn(self):
return self.find_element(self.clock_btn_loc)
# com.tencent.mm:id/ox
no_clock_btn_loc = ('xpath', '//*[contains(@text, "你还未开启打卡")]') # 你还未开启打卡
def element_no_clock_btn(self):
return self.find_element(self.no_clock_btn_loc)
get_card_btn_loc = ('xpath', '//*[@text="获取打卡海报" and @class="android.widget.Button"]') # 获取打卡海报
def click_get_card_btn(self):
self.click(self.get_card_btn_loc)
upload_card_btn_loc = ('xpath', '//*[@text="上传截图" and @class="android.widget.Button"]') # 上传截图
def click_upload_card_btn(self):
self.click(self.upload_card_btn_loc)
again_upload_card_btn_loc = ('xpath', '//*[@text="重新上传截图" and @class="android.widget.Button"]') # 重新上传截图
def click_again_upload_card_btn(self):
self.click(self.again_upload_card_btn_loc)
save_img_btn_loc = ('xpath', '//*[@text="保存图片" and @class="android.widget.Button"]') # 保存图片
def click_save_img_btn(self):
self.click(self.save_img_btn_loc)
copy_text_btn_loc = ('xpath', '//*[@text="复制发圈文案" and @class="android.widget.Button"]') # 复制发圈文案
def click_copy_text_btn(self):
self.click(self.copy_text_btn_loc)
copy_format_btn_loc = ('xpath', '//*[contains(@text, "发布朋友圈截图规范")]') # 发布朋友圈截图规范
def element_copy_format_btn(self):
return self.find_element(self.copy_format_btn_loc)
card_go_btn_loc = ('xpath', '//*[contains(@text, "关闭小程序,去朋友圈打卡截图")]') # 关闭小程序,去朋友圈打卡截图
def click_card_go_btn(self):
self.click(self.card_go_btn_loc)
upload_btn_loc = ('xpath', '//*[@text="上传" and @class="android.widget.Button"]') # 上传
def click_upload_btn(self):
self.click(self.upload_btn_loc)
today_card_btn_loc = ('xpath', '//*[contains(@text, "今日已提交打卡")]') # 今日已提交打卡
def element_today_card_btn(self):
return self.find_element(self.today_card_btn_loc)
reset_img_btn_loc = ('xpath', '//*[@text="重新选择截图" and @class="android.widget.Button"]') # 重新选择截图
def click_reset_img_btn(self):
self.click(self.reset_img_btn_loc)
generated_loading_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="正在生成..."]') # 正在生成...
def element_generated_loading(self):
return self.find_element(self.generated_loading_loc)
reminder_btn_loc = ('xpath', '//*[contains(@text, "温馨提示")]') # 温馨提示
def element_reminder_btn(self):
return self.find_element(self.reminder_btn_loc)
page_expired_loc = ('xpath', '//*[contains(@text, "页面已经过期")]') # 页面已经过期
def element_page_expired(self):
return self.find_element(self.page_expired_loc)
x_btn_loc = ('id', 'com.tencent.mm:id/kx')
def click_x_btn(self):
self.click(self.x_btn_loc)
|
normal
|
{
"blob_id": "1980fb4d6e7d3c6fe51f4a242610b5489e553859",
"index": 128,
"step-1": "<mask token>\n\n\nclass Zaojiaopage(Crazy):\n <mask token>\n <mask token>\n\n def click_zao(self):\n self.click(self.zao_btn_loc)\n <mask token>\n <mask token>\n\n def click_find(self):\n self.click(self.find_loc)\n <mask token>\n\n def click_title_btn(self):\n self.click(self.title_btn_loc)\n <mask token>\n <mask token>\n\n def click_helper(self):\n self.click(self.helper_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_small_name(self):\n return self.find_element(self.small_name_loc)\n\n def click_small_name(self):\n self.click(self.small_name_loc)\n <mask token>\n\n def click_switching_applet_btn(self):\n self.click(self.switching_applet_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clicks_experience_version_btn(self):\n self.clicks(self.experience_version_btn_loc, -1)\n <mask token>\n\n def element_audition_class_btn(self):\n return self.find_element(self.audition_class_btn_loc)\n\n def click_audition_class_btn(self):\n self.click(self.audition_class_btn_loc)\n <mask token>\n\n def click_wechat_grant_btn(self):\n self.click(self.wechat_grant_btn_loc)\n\n def double_click_wechat_grant(self):\n self.double_click(self.wechat_grant_btn_loc)\n\n def element_wechat_grant_btn(self):\n return self.find_element(self.wechat_grant_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def element_attend_lectures_btn(self):\n return self.find_element(self.attend_lectures_btn_loc)\n <mask token>\n <mask token>\n\n def element_class_btn(self):\n return self.find_element(self.class_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def input_buy_password(self, paw):\n self.send_keys(self.buy_password_loc, paw)\n <mask token>\n <mask token>\n <mask token>\n\n def click_success_btn(self):\n self.click(self.success_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_add_address_btn(self):\n self.click(self.add_address_btn_loc)\n <mask token>\n\n def input_name_btn(self, name):\n self.send_keys(self.name_loc, name)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def input_detailed_address_btn(self, address):\n self.send_keys(self.detailed_address_btn_loc, address)\n <mask token>\n\n def click_save_btn(self):\n self.click(self.save_btn_loc)\n <mask token>\n\n def click_receive_btn(self):\n self.click(self.receive_btn_loc)\n <mask token>\n <mask token>\n\n def clicks_addressee(self):\n self.clicks(self.addressee_loc, 0)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def element_all_curriculum_btn(self):\n return self.find_element(self.all_curriculum_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def text_my_baby_title(self):\n return self.get_text(self.my_baby_title_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clicks_new_baby_btn(self, n):\n self.clicks(self.new_baby_btn_loc, n)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_baby_bir_btn(self):\n self.click(self.baby_bir_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_my_home(self):\n self.click(self.my_home_loc)\n\n def element_my_home(self):\n return self.find_element(self.my_home_loc)\n <mask token>\n\n def click_switch_btn(self):\n self.click(self.switch_btn_loc)\n <mask token>\n\n def click_baby_bri(self):\n self.click(self.baby_bri_loc)\n <mask token>\n\n def clicks_class_img(self):\n self.clicks(self.class_img_btn_loc, 0)\n <mask token>\n\n def click_collection_btn(self):\n self.click(self.collection_btn_loc)\n\n def clicks_collection_btn(self, n):\n self.clicks(self.collection_btn_loc, n)\n <mask token>\n <mask token>\n\n def click_write_record_btn(self):\n self.click(self.write_record_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_album_btn(self):\n return self.find_element(self.album_btn_loc)\n <mask token>\n <mask token>\n\n def element_small_video_btn(self):\n return self.find_element(self.small_video_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_record_info(self, data):\n record_info_loc = 'xpath', '//*[contains(@text, \"{}\")]'.format(data)\n record_info = self.find_element(record_info_loc)\n if record_info:\n return True\n else:\n return False\n <mask token>\n <mask token>\n <mask token>\n\n def elements_class_name(self):\n return self.find_elements(self.class_name_loc)\n <mask token>\n\n def click_class2_name(self):\n self.click(self.class_name2_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def elements_choice_album(self):\n return self.find_elements(self.choice_album_loc)\n <mask token>\n\n def click_complete_btn(self):\n self.click(self.complete_btn_loc)\n <mask token>\n\n def click_my_collection_btn(self):\n self.click(self.my_collection_btn_loc)\n <mask token>\n\n def elements_my_collection_english_course_btn(self):\n return self.find_elements(self.my_collection_english_course_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_my_course_btn(self):\n self.click(self.my_course_btn_loc)\n <mask token>\n\n def elements_my_course_buy_btn(self):\n return self.find_elements(self.my_course_buy_btn_loc)\n <mask token>\n\n def click_my_order_btn(self):\n self.click(self.my_order_btn_loc)\n <mask token>\n\n def elements_my_order_card_btn(self):\n return self.find_elements(self.my_order_card_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def elements_my_record_class_btn(self):\n return self.find_elements(self.my_record_class_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clicks_reply_code(self, n):\n self.clicks(self.reply_code_loc, n)\n <mask token>\n\n def element_long_code(self):\n return self.find_element(self.long_code_loc)\n\n def click_long_code(self):\n self.click(self.long_code_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def text_class_group(self):\n return self.get_text(self.class_group_loc)\n <mask token>\n\n def element_add_group_chat(self):\n return self.find_element(self.add_group_chat_loc)\n <mask token>\n\n def elements_reply_8(self):\n return self.find_elements(self.reply_8_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_more_games_btn(self):\n self.click(self.more_games_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def text_start_fingerprint_buy(self):\n return self.get_text(self.start_fingerprint_buy_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_cancel_btn(self):\n self.click(self.cancel_btn_loc)\n <mask token>\n\n def element_usd_password(self):\n return self.find_element(self.usd_password_loc)\n <mask token>\n <mask token>\n\n def element_password_error(self):\n return self.find_element(self.password_error_loc)\n <mask token>\n\n def click_again_btn(self):\n self.click(self.again_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_typewriting_finish_btn(self):\n return self.find_element(self.typewriting_finish_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_clock_btn(self):\n return self.find_element(self.clock_btn_loc)\n <mask token>\n\n def element_no_clock_btn(self):\n return self.find_element(self.no_clock_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_upload_card_btn(self):\n self.click(self.upload_card_btn_loc)\n <mask token>\n\n def click_again_upload_card_btn(self):\n self.click(self.again_upload_card_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def element_copy_format_btn(self):\n return self.find_element(self.copy_format_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_upload_btn(self):\n self.click(self.upload_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_reset_img_btn(self):\n self.click(self.reset_img_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_reminder_btn(self):\n return self.find_element(self.reminder_btn_loc)\n <mask token>\n\n def element_page_expired(self):\n return self.find_element(self.page_expired_loc)\n <mask token>\n\n def click_x_btn(self):\n self.click(self.x_btn_loc)\n",
"step-2": "<mask token>\n\n\nclass Zaojiaopage(Crazy):\n <mask token>\n <mask token>\n\n def click_zao(self):\n self.click(self.zao_btn_loc)\n <mask token>\n <mask token>\n\n def click_find(self):\n self.click(self.find_loc)\n <mask token>\n\n def click_title_btn(self):\n self.click(self.title_btn_loc)\n <mask token>\n <mask token>\n\n def click_helper(self):\n self.click(self.helper_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_small_name(self):\n return self.find_element(self.small_name_loc)\n\n def click_small_name(self):\n self.click(self.small_name_loc)\n <mask token>\n\n def click_switching_applet_btn(self):\n self.click(self.switching_applet_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clicks_experience_version_btn(self):\n self.clicks(self.experience_version_btn_loc, -1)\n <mask token>\n\n def element_audition_class_btn(self):\n return self.find_element(self.audition_class_btn_loc)\n\n def click_audition_class_btn(self):\n self.click(self.audition_class_btn_loc)\n <mask token>\n\n def click_wechat_grant_btn(self):\n self.click(self.wechat_grant_btn_loc)\n\n def double_click_wechat_grant(self):\n self.double_click(self.wechat_grant_btn_loc)\n\n def element_wechat_grant_btn(self):\n return self.find_element(self.wechat_grant_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_mouth_btn(self):\n self.click(self.month_btn_loc)\n <mask token>\n\n def click_sure_btn(self):\n self.click(self.sure_btn_loc)\n <mask token>\n\n def class_info_btn(self):\n self.click(self.class_info_loc)\n <mask token>\n\n def element_attend_lectures_btn(self):\n return self.find_element(self.attend_lectures_btn_loc)\n <mask token>\n <mask token>\n\n def element_class_btn(self):\n return self.find_element(self.class_btn_loc)\n <mask token>\n <mask token>\n\n def element_get_to_know_btn(self):\n return self.find_element(self.get_to_know_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def input_buy_password(self, paw):\n self.send_keys(self.buy_password_loc, paw)\n <mask token>\n <mask token>\n <mask token>\n\n def click_success_btn(self):\n self.click(self.success_btn_loc)\n <mask token>\n\n def click_check_address_btn(self):\n self.click(self.check_address_btn_loc)\n <mask token>\n <mask token>\n\n def click_add_address_btn(self):\n self.click(self.add_address_btn_loc)\n <mask token>\n\n def input_name_btn(self, name):\n self.send_keys(self.name_loc, name)\n <mask token>\n\n def input_phone_btn(self, phone):\n self.send_keys(self.phone_btn_loc, phone)\n <mask token>\n <mask token>\n <mask token>\n\n def input_detailed_address_btn(self, address):\n self.send_keys(self.detailed_address_btn_loc, address)\n <mask token>\n\n def click_save_btn(self):\n self.click(self.save_btn_loc)\n <mask token>\n\n def click_receive_btn(self):\n self.click(self.receive_btn_loc)\n <mask token>\n <mask token>\n\n def clicks_addressee(self):\n self.clicks(self.addressee_loc, 0)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def element_all_curriculum_btn(self):\n return self.find_element(self.all_curriculum_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def element_my_btn(self):\n return self.find_element(self.my_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def text_my_baby_title(self):\n return self.get_text(self.my_baby_title_loc)\n <mask token>\n <mask token>\n\n def element_new_baby_btn(self):\n return self.find_element(self.new_baby_btn_loc)\n <mask token>\n\n def clicks_new_baby_btn(self, n):\n self.clicks(self.new_baby_btn_loc, n)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_baby_bir_btn(self):\n self.click(self.baby_bir_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_my_home(self):\n self.click(self.my_home_loc)\n\n def element_my_home(self):\n return self.find_element(self.my_home_loc)\n <mask token>\n\n def click_switch_btn(self):\n self.click(self.switch_btn_loc)\n <mask token>\n\n def click_baby_bri(self):\n self.click(self.baby_bri_loc)\n <mask token>\n\n def clicks_class_img(self):\n self.clicks(self.class_img_btn_loc, 0)\n <mask token>\n\n def click_collection_btn(self):\n self.click(self.collection_btn_loc)\n\n def clicks_collection_btn(self, n):\n self.clicks(self.collection_btn_loc, n)\n <mask token>\n <mask token>\n\n def click_write_record_btn(self):\n self.click(self.write_record_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_album_btn(self):\n return self.find_element(self.album_btn_loc)\n <mask token>\n <mask token>\n\n def element_small_video_btn(self):\n return self.find_element(self.small_video_btn_loc)\n <mask token>\n <mask token>\n\n def clicks_release_btn(self, n):\n self.clicks(self.release_btn_loc, n)\n\n def element_record_info(self, data):\n record_info_loc = 'xpath', '//*[contains(@text, \"{}\")]'.format(data)\n record_info = self.find_element(record_info_loc)\n if record_info:\n return True\n else:\n return False\n <mask token>\n <mask token>\n <mask token>\n\n def elements_class_name(self):\n return self.find_elements(self.class_name_loc)\n <mask token>\n\n def click_class2_name(self):\n self.click(self.class_name2_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clicks_choice_album(self, n):\n self.clicks(self.choice_album_loc, n)\n\n def elements_choice_album(self):\n return self.find_elements(self.choice_album_loc)\n <mask token>\n\n def click_complete_btn(self):\n self.click(self.complete_btn_loc)\n <mask token>\n\n def click_my_collection_btn(self):\n self.click(self.my_collection_btn_loc)\n <mask token>\n\n def elements_my_collection_english_course_btn(self):\n return self.find_elements(self.my_collection_english_course_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_my_course_btn(self):\n self.click(self.my_course_btn_loc)\n <mask token>\n\n def elements_my_course_buy_btn(self):\n return self.find_elements(self.my_course_buy_btn_loc)\n <mask token>\n\n def click_my_order_btn(self):\n self.click(self.my_order_btn_loc)\n <mask token>\n\n def elements_my_order_card_btn(self):\n return self.find_elements(self.my_order_card_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def elements_my_record_class_btn(self):\n return self.find_elements(self.my_record_class_btn_loc)\n <mask token>\n <mask token>\n\n def click_back_btn(self):\n self.click(self.back_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_send(self):\n self.click(self.send_5_loc)\n <mask token>\n <mask token>\n\n def clicks_reply_code(self, n):\n self.clicks(self.reply_code_loc, n)\n <mask token>\n\n def element_long_code(self):\n return self.find_element(self.long_code_loc)\n\n def click_long_code(self):\n self.click(self.long_code_loc)\n <mask token>\n\n def click_discern_code(self):\n self.click(self.discern_code_loc)\n <mask token>\n\n def text_class_group(self):\n return self.get_text(self.class_group_loc)\n <mask token>\n\n def element_add_group_chat(self):\n return self.find_element(self.add_group_chat_loc)\n <mask token>\n\n def elements_reply_8(self):\n return self.find_elements(self.reply_8_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_more_games_btn(self):\n self.click(self.more_games_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def text_start_fingerprint_buy(self):\n return self.get_text(self.start_fingerprint_buy_loc)\n <mask token>\n\n def click_no_more_reminder_btn(self):\n self.click(self.no_more_reminder_btn_loc)\n <mask token>\n\n def click_cancel_btn(self):\n self.click(self.cancel_btn_loc)\n <mask token>\n\n def element_usd_password(self):\n return self.find_element(self.usd_password_loc)\n <mask token>\n <mask token>\n\n def element_password_error(self):\n return self.find_element(self.password_error_loc)\n <mask token>\n\n def click_again_btn(self):\n self.click(self.again_btn_loc)\n <mask token>\n\n def text_payment(self):\n return self.get_text(self.payment_loc)\n <mask token>\n\n def element_typewriting_finish_btn(self):\n return self.find_element(self.typewriting_finish_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_clock_btn(self):\n return self.find_element(self.clock_btn_loc)\n <mask token>\n\n def element_no_clock_btn(self):\n return self.find_element(self.no_clock_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_upload_card_btn(self):\n self.click(self.upload_card_btn_loc)\n <mask token>\n\n def click_again_upload_card_btn(self):\n self.click(self.again_upload_card_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_copy_text_btn(self):\n self.click(self.copy_text_btn_loc)\n <mask token>\n\n def element_copy_format_btn(self):\n return self.find_element(self.copy_format_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_upload_btn(self):\n self.click(self.upload_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_reset_img_btn(self):\n self.click(self.reset_img_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_reminder_btn(self):\n return self.find_element(self.reminder_btn_loc)\n <mask token>\n\n def element_page_expired(self):\n return self.find_element(self.page_expired_loc)\n <mask token>\n\n def click_x_btn(self):\n self.click(self.x_btn_loc)\n",
"step-3": "<mask token>\n\n\nclass Zaojiaopage(Crazy):\n <mask token>\n <mask token>\n\n def click_zao(self):\n self.click(self.zao_btn_loc)\n\n def element_zao(self):\n return self.find_element(self.zao_btn_loc)\n <mask token>\n\n def click_find(self):\n self.click(self.find_loc)\n <mask token>\n\n def click_title_btn(self):\n self.click(self.title_btn_loc)\n <mask token>\n <mask token>\n\n def click_helper(self):\n self.click(self.helper_loc)\n <mask token>\n\n def click_small_help_btn(self):\n self.click(self.small_help_btn_loc)\n <mask token>\n\n def element_small_name(self):\n return self.find_element(self.small_name_loc)\n\n def click_small_name(self):\n self.click(self.small_name_loc)\n <mask token>\n\n def click_switching_applet_btn(self):\n self.click(self.switching_applet_btn_loc)\n <mask token>\n\n def click_delete_small_btn(self):\n self.click(self.delete_small_btn_loc)\n <mask token>\n\n def element_edition_btn(self):\n return self.find_element(self.edition_btn_loc)\n <mask token>\n\n def element_delete_small1_btn(self):\n return self.find_element(self.delete_small1_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def clicks_experience_version_btn(self):\n self.clicks(self.experience_version_btn_loc, -1)\n <mask token>\n\n def element_audition_class_btn(self):\n return self.find_element(self.audition_class_btn_loc)\n\n def click_audition_class_btn(self):\n self.click(self.audition_class_btn_loc)\n <mask token>\n\n def click_wechat_grant_btn(self):\n self.click(self.wechat_grant_btn_loc)\n\n def double_click_wechat_grant(self):\n self.double_click(self.wechat_grant_btn_loc)\n\n def element_wechat_grant_btn(self):\n return self.find_element(self.wechat_grant_btn_loc)\n <mask token>\n\n def click_allow_btn(self):\n self.click(self.allow_btn_loc)\n <mask token>\n\n def click_mouth_btn(self):\n self.click(self.month_btn_loc)\n <mask token>\n\n def click_sure_btn(self):\n self.click(self.sure_btn_loc)\n <mask token>\n\n def class_info_btn(self):\n self.click(self.class_info_loc)\n <mask token>\n\n def element_attend_lectures_btn(self):\n return self.find_element(self.attend_lectures_btn_loc)\n <mask token>\n <mask token>\n\n def element_class_btn(self):\n return self.find_element(self.class_btn_loc)\n <mask token>\n\n def click_get_to_know_btn(self):\n self.click(self.get_to_know_btn_loc)\n\n def element_get_to_know_btn(self):\n return self.find_element(self.get_to_know_btn_loc)\n <mask token>\n\n def click_sure_buy_btn(self):\n self.click(self.sure_buy_btn_loc)\n <mask token>\n\n def input_buy_password(self, paw):\n self.send_keys(self.buy_password_loc, paw)\n <mask token>\n\n def text_buy_money(self):\n return self.get_text(self.check_buy_money_loc)\n <mask token>\n\n def click_success_btn(self):\n self.click(self.success_btn_loc)\n <mask token>\n\n def click_check_address_btn(self):\n self.click(self.check_address_btn_loc)\n <mask token>\n <mask token>\n\n def click_add_address_btn(self):\n self.click(self.add_address_btn_loc)\n <mask token>\n\n def input_name_btn(self, name):\n self.send_keys(self.name_loc, name)\n <mask token>\n\n def input_phone_btn(self, phone):\n self.send_keys(self.phone_btn_loc, phone)\n <mask token>\n <mask token>\n <mask token>\n\n def input_detailed_address_btn(self, address):\n self.send_keys(self.detailed_address_btn_loc, address)\n <mask token>\n\n def click_save_btn(self):\n self.click(self.save_btn_loc)\n <mask token>\n\n def click_receive_btn(self):\n self.click(self.receive_btn_loc)\n <mask token>\n\n def elements_addressee(self):\n return self.find_elements(self.addressee_loc)\n\n def clicks_addressee(self):\n self.clicks(self.addressee_loc, 0)\n <mask token>\n <mask token>\n\n def click_know(self):\n self.click(self.know_btn_loc)\n <mask token>\n\n def element_all_curriculum_btn(self):\n return self.find_element(self.all_curriculum_btn_loc)\n\n def click_all_curriculum_btn(self):\n self.click(self.all_curriculum_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_my_btn(self):\n return self.find_element(self.my_btn_loc)\n <mask token>\n <mask token>\n\n def click_my_baby(self):\n self.click(self.my_baby_btn_loc)\n <mask token>\n\n def text_my_baby_title(self):\n return self.get_text(self.my_baby_title_loc)\n <mask token>\n <mask token>\n\n def element_new_baby_btn(self):\n return self.find_element(self.new_baby_btn_loc)\n\n def click_new_baby_btn(self):\n self.click(self.new_baby_btn_loc)\n\n def clicks_new_baby_btn(self, n):\n self.clicks(self.new_baby_btn_loc, n)\n <mask token>\n\n def element_get_set(self):\n return self.find_element(self.get_set_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def inputs_baby_name(self, name, n):\n self.sends_keys(self.baby_name_loc, name, n)\n <mask token>\n\n def click_baby_bir_btn(self):\n self.click(self.baby_bir_btn_loc)\n <mask token>\n\n def click_finish_btn(self):\n self.click(self.finish_btn_loc)\n <mask token>\n <mask token>\n\n def click_my_home(self):\n self.click(self.my_home_loc)\n\n def element_my_home(self):\n return self.find_element(self.my_home_loc)\n <mask token>\n\n def click_switch_btn(self):\n self.click(self.switch_btn_loc)\n <mask token>\n\n def click_baby_bri(self):\n self.click(self.baby_bri_loc)\n <mask token>\n\n def clicks_class_img(self):\n self.clicks(self.class_img_btn_loc, 0)\n <mask token>\n\n def click_collection_btn(self):\n self.click(self.collection_btn_loc)\n\n def clicks_collection_btn(self, n):\n self.clicks(self.collection_btn_loc, n)\n <mask token>\n <mask token>\n\n def click_write_record_btn(self):\n self.click(self.write_record_btn_loc)\n\n def clicks_write_record_btn(self, n):\n self.clicks(self.write_record_btn_loc, n)\n <mask token>\n\n def click_album_btn(self):\n self.click(self.album_btn_loc)\n\n def element_album_btn(self):\n return self.find_element(self.album_btn_loc)\n <mask token>\n\n def click_small_video_btn(self):\n self.click(self.small_video_btn_loc)\n\n def element_small_video_btn(self):\n return self.find_element(self.small_video_btn_loc)\n <mask token>\n <mask token>\n\n def clicks_release_btn(self, n):\n self.clicks(self.release_btn_loc, n)\n\n def element_record_info(self, data):\n record_info_loc = 'xpath', '//*[contains(@text, \"{}\")]'.format(data)\n record_info = self.find_element(record_info_loc)\n if record_info:\n return True\n else:\n return False\n <mask token>\n <mask token>\n <mask token>\n\n def elements_class_name(self):\n return self.find_elements(self.class_name_loc)\n <mask token>\n\n def click_class2_name(self):\n self.click(self.class_name2_loc)\n <mask token>\n <mask token>\n\n def input_write_text(self, text):\n self.send_keys(self.write_text_loc, text)\n <mask token>\n <mask token>\n\n def clicks_choice_album(self, n):\n self.clicks(self.choice_album_loc, n)\n\n def elements_choice_album(self):\n return self.find_elements(self.choice_album_loc)\n <mask token>\n\n def click_complete_btn(self):\n self.click(self.complete_btn_loc)\n <mask token>\n\n def click_my_collection_btn(self):\n self.click(self.my_collection_btn_loc)\n <mask token>\n\n def elements_my_collection_english_course_btn(self):\n return self.find_elements(self.my_collection_english_course_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_my_course_btn(self):\n self.click(self.my_course_btn_loc)\n <mask token>\n\n def elements_my_course_buy_btn(self):\n return self.find_elements(self.my_course_buy_btn_loc)\n <mask token>\n\n def click_my_order_btn(self):\n self.click(self.my_order_btn_loc)\n <mask token>\n\n def elements_my_order_card_btn(self):\n return self.find_elements(self.my_order_card_btn_loc)\n <mask token>\n\n def click_my_record_btn(self):\n self.click(self.my_record_btn_loc)\n <mask token>\n\n def elements_my_record_class_btn(self):\n return self.find_elements(self.my_record_class_btn_loc)\n <mask token>\n\n def element_back_btn(self):\n return self.find_element(self.back_btn_loc)\n\n def click_back_btn(self):\n self.click(self.back_btn_loc)\n <mask token>\n\n def click_reply_5(self):\n self.click(self.reply_5_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def input_reply_5(self, num):\n self.send_keys(self.reply_input_5_loc, num)\n <mask token>\n\n def click_send(self):\n self.click(self.send_5_loc)\n <mask token>\n <mask token>\n\n def clicks_reply_code(self, n):\n self.clicks(self.reply_code_loc, n)\n <mask token>\n\n def element_long_code(self):\n return self.find_element(self.long_code_loc)\n\n def click_long_code(self):\n self.click(self.long_code_loc)\n <mask token>\n\n def click_discern_code(self):\n self.click(self.discern_code_loc)\n <mask token>\n\n def text_class_group(self):\n return self.get_text(self.class_group_loc)\n <mask token>\n\n def element_add_group_chat(self):\n return self.find_element(self.add_group_chat_loc)\n <mask token>\n\n def elements_reply_8(self):\n return self.find_elements(self.reply_8_loc)\n <mask token>\n\n def element_parent_btn(self):\n return self.find_element(self.parent_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def click_more_games_btn(self):\n self.click(self.more_games_btn_loc)\n <mask token>\n\n def click_look_all_btn(self):\n self.click(self.look_all_btn_loc)\n\n def element_look_all_btn(self):\n return self.find_elements(self.look_all_btn_loc)\n <mask token>\n\n def text_start_fingerprint_buy(self):\n return self.get_text(self.start_fingerprint_buy_loc)\n <mask token>\n\n def click_no_more_reminder_btn(self):\n self.click(self.no_more_reminder_btn_loc)\n <mask token>\n\n def click_cancel_btn(self):\n self.click(self.cancel_btn_loc)\n <mask token>\n\n def element_usd_password(self):\n return self.find_element(self.usd_password_loc)\n <mask token>\n <mask token>\n\n def element_password_error(self):\n return self.find_element(self.password_error_loc)\n <mask token>\n\n def click_again_btn(self):\n self.click(self.again_btn_loc)\n <mask token>\n\n def text_payment(self):\n return self.get_text(self.payment_loc)\n <mask token>\n\n def element_typewriting_finish_btn(self):\n return self.find_element(self.typewriting_finish_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def element_clock_btn(self):\n return self.find_element(self.clock_btn_loc)\n <mask token>\n\n def element_no_clock_btn(self):\n return self.find_element(self.no_clock_btn_loc)\n <mask token>\n <mask token>\n <mask token>\n\n def click_upload_card_btn(self):\n self.click(self.upload_card_btn_loc)\n <mask token>\n\n def click_again_upload_card_btn(self):\n self.click(self.again_upload_card_btn_loc)\n <mask token>\n\n def click_save_img_btn(self):\n self.click(self.save_img_btn_loc)\n <mask token>\n\n def click_copy_text_btn(self):\n self.click(self.copy_text_btn_loc)\n <mask token>\n\n def element_copy_format_btn(self):\n return self.find_element(self.copy_format_btn_loc)\n <mask token>\n\n def click_card_go_btn(self):\n self.click(self.card_go_btn_loc)\n <mask token>\n\n def click_upload_btn(self):\n self.click(self.upload_btn_loc)\n <mask token>\n\n def element_today_card_btn(self):\n return self.find_element(self.today_card_btn_loc)\n <mask token>\n\n def click_reset_img_btn(self):\n self.click(self.reset_img_btn_loc)\n <mask token>\n\n def element_generated_loading(self):\n return self.find_element(self.generated_loading_loc)\n <mask token>\n\n def element_reminder_btn(self):\n return self.find_element(self.reminder_btn_loc)\n <mask token>\n\n def element_page_expired(self):\n return self.find_element(self.page_expired_loc)\n <mask token>\n\n def click_x_btn(self):\n self.click(self.x_btn_loc)\n",
"step-4": "<mask token>\n\n\nclass Zaojiaopage(Crazy):\n <mask token>\n <mask token>\n\n def click_zao(self):\n self.click(self.zao_btn_loc)\n\n def element_zao(self):\n return self.find_element(self.zao_btn_loc)\n <mask token>\n\n def click_find(self):\n self.click(self.find_loc)\n <mask token>\n\n def click_title_btn(self):\n self.click(self.title_btn_loc)\n <mask token>\n\n def element_helper(self):\n return self.find_element(self.helper_loc)\n\n def click_helper(self):\n self.click(self.helper_loc)\n <mask token>\n\n def click_small_help_btn(self):\n self.click(self.small_help_btn_loc)\n <mask token>\n\n def element_small_name(self):\n return self.find_element(self.small_name_loc)\n\n def click_small_name(self):\n self.click(self.small_name_loc)\n <mask token>\n\n def click_switching_applet_btn(self):\n self.click(self.switching_applet_btn_loc)\n <mask token>\n\n def click_delete_small_btn(self):\n self.click(self.delete_small_btn_loc)\n <mask token>\n\n def element_edition_btn(self):\n return self.find_element(self.edition_btn_loc)\n <mask token>\n\n def element_delete_small1_btn(self):\n return self.find_element(self.delete_small1_btn_loc)\n <mask token>\n\n def click_version_btn(self):\n self.click(self.version_btn_loc)\n <mask token>\n\n def clicks_experience_version_btn(self):\n self.clicks(self.experience_version_btn_loc, -1)\n <mask token>\n\n def element_audition_class_btn(self):\n return self.find_element(self.audition_class_btn_loc)\n\n def click_audition_class_btn(self):\n self.click(self.audition_class_btn_loc)\n <mask token>\n\n def click_wechat_grant_btn(self):\n self.click(self.wechat_grant_btn_loc)\n\n def double_click_wechat_grant(self):\n self.double_click(self.wechat_grant_btn_loc)\n\n def element_wechat_grant_btn(self):\n return self.find_element(self.wechat_grant_btn_loc)\n <mask token>\n\n def click_allow_btn(self):\n self.click(self.allow_btn_loc)\n <mask token>\n\n def click_mouth_btn(self):\n self.click(self.month_btn_loc)\n <mask token>\n\n def click_sure_btn(self):\n self.click(self.sure_btn_loc)\n <mask token>\n\n def class_info_btn(self):\n self.click(self.class_info_loc)\n <mask token>\n\n def element_attend_lectures_btn(self):\n return self.find_element(self.attend_lectures_btn_loc)\n\n def click_attend_lectures_btn(self):\n self.click(self.attend_lectures_btn_loc)\n <mask token>\n\n def element_class_btn(self):\n return self.find_element(self.class_btn_loc)\n <mask token>\n\n def click_get_to_know_btn(self):\n self.click(self.get_to_know_btn_loc)\n\n def element_get_to_know_btn(self):\n return self.find_element(self.get_to_know_btn_loc)\n <mask token>\n\n def click_sure_buy_btn(self):\n self.click(self.sure_buy_btn_loc)\n <mask token>\n\n def input_buy_password(self, paw):\n self.send_keys(self.buy_password_loc, paw)\n <mask token>\n\n def text_buy_money(self):\n return self.get_text(self.check_buy_money_loc)\n <mask token>\n\n def click_success_btn(self):\n self.click(self.success_btn_loc)\n <mask token>\n\n def click_check_address_btn(self):\n self.click(self.check_address_btn_loc)\n\n def element_check_address_btn(self):\n return self.find_element(self.check_address_btn_loc)\n <mask token>\n\n def click_add_address_btn(self):\n self.click(self.add_address_btn_loc)\n <mask token>\n\n def input_name_btn(self, name):\n self.send_keys(self.name_loc, name)\n <mask token>\n\n def input_phone_btn(self, phone):\n self.send_keys(self.phone_btn_loc, phone)\n <mask token>\n\n def click_region_btn(self):\n self.click(self.region_btn_loc)\n <mask token>\n\n def input_detailed_address_btn(self, address):\n self.send_keys(self.detailed_address_btn_loc, address)\n <mask token>\n\n def click_save_btn(self):\n self.click(self.save_btn_loc)\n <mask token>\n\n def click_receive_btn(self):\n self.click(self.receive_btn_loc)\n <mask token>\n\n def elements_addressee(self):\n return self.find_elements(self.addressee_loc)\n\n def clicks_addressee(self):\n self.clicks(self.addressee_loc, 0)\n <mask token>\n\n def element_know(self):\n return self.find_element(self.know_btn_loc)\n\n def click_know(self):\n self.click(self.know_btn_loc)\n <mask token>\n\n def element_all_curriculum_btn(self):\n return self.find_element(self.all_curriculum_btn_loc)\n\n def click_all_curriculum_btn(self):\n self.click(self.all_curriculum_btn_loc)\n <mask token>\n\n def element_curriculum_date_btn(self):\n return self.find_element(self.curriculum_date_btn_loc)\n <mask token>\n\n def element_my_btn(self):\n return self.find_element(self.my_btn_loc)\n\n def click_my(self):\n self.click(self.my_btn_loc)\n <mask token>\n\n def click_my_baby(self):\n self.click(self.my_baby_btn_loc)\n <mask token>\n\n def text_my_baby_title(self):\n return self.get_text(self.my_baby_title_loc)\n\n def elements_title(self):\n return self.find_elements(self.my_baby_title_loc)\n <mask token>\n\n def element_new_baby_btn(self):\n return self.find_element(self.new_baby_btn_loc)\n\n def click_new_baby_btn(self):\n self.click(self.new_baby_btn_loc)\n\n def clicks_new_baby_btn(self, n):\n self.clicks(self.new_baby_btn_loc, n)\n <mask token>\n\n def element_get_set(self):\n return self.find_element(self.get_set_loc)\n <mask token>\n\n def click_next(self):\n self.click(self.next_btn_loc)\n <mask token>\n\n def inputs_baby_name(self, name, n):\n self.sends_keys(self.baby_name_loc, name, n)\n <mask token>\n\n def click_baby_bir_btn(self):\n self.click(self.baby_bir_btn_loc)\n <mask token>\n\n def click_finish_btn(self):\n self.click(self.finish_btn_loc)\n\n def clicks_finish_btn(self, n):\n self.clicks(self.finish_btn_loc, n)\n <mask token>\n\n def click_my_home(self):\n self.click(self.my_home_loc)\n\n def element_my_home(self):\n return self.find_element(self.my_home_loc)\n <mask token>\n\n def click_switch_btn(self):\n self.click(self.switch_btn_loc)\n <mask token>\n\n def click_baby_bri(self):\n self.click(self.baby_bri_loc)\n <mask token>\n\n def clicks_class_img(self):\n self.clicks(self.class_img_btn_loc, 0)\n <mask token>\n\n def click_collection_btn(self):\n self.click(self.collection_btn_loc)\n\n def clicks_collection_btn(self, n):\n self.clicks(self.collection_btn_loc, n)\n\n def element_collection_btn(self):\n return self.find_element(self.collection_btn_loc)\n <mask token>\n\n def click_write_record_btn(self):\n self.click(self.write_record_btn_loc)\n\n def clicks_write_record_btn(self, n):\n self.clicks(self.write_record_btn_loc, n)\n <mask token>\n\n def click_album_btn(self):\n self.click(self.album_btn_loc)\n\n def element_album_btn(self):\n return self.find_element(self.album_btn_loc)\n <mask token>\n\n def click_small_video_btn(self):\n self.click(self.small_video_btn_loc)\n\n def element_small_video_btn(self):\n return self.find_element(self.small_video_btn_loc)\n <mask token>\n\n def click_release_btn(self):\n self.click(self.release_btn_loc)\n\n def clicks_release_btn(self, n):\n self.clicks(self.release_btn_loc, n)\n\n def element_record_info(self, data):\n record_info_loc = 'xpath', '//*[contains(@text, \"{}\")]'.format(data)\n record_info = self.find_element(record_info_loc)\n if record_info:\n return True\n else:\n return False\n <mask token>\n\n def click_class_name(self):\n self.click(self.class_name_loc)\n\n def clicks_class_name(self, n):\n self.clicks(self.class_name_loc, n)\n\n def elements_class_name(self):\n return self.find_elements(self.class_name_loc)\n <mask token>\n\n def click_class2_name(self):\n self.click(self.class_name2_loc)\n\n def clicks_class2_name(self, n):\n self.clicks(self.class_name2_loc, n)\n <mask token>\n\n def input_write_text(self, text):\n self.send_keys(self.write_text_loc, text)\n\n def inputs_write_text(self, text, n):\n self.sends_keys(self.write_text_loc, text, n)\n <mask token>\n\n def clicks_choice_album(self, n):\n self.clicks(self.choice_album_loc, n)\n\n def elements_choice_album(self):\n return self.find_elements(self.choice_album_loc)\n <mask token>\n\n def click_complete_btn(self):\n self.click(self.complete_btn_loc)\n <mask token>\n\n def click_my_collection_btn(self):\n self.click(self.my_collection_btn_loc)\n <mask token>\n\n def elements_my_collection_english_course_btn(self):\n return self.find_elements(self.my_collection_english_course_btn_loc)\n <mask token>\n\n def elements_my_collection_game_course_btn(self):\n return self.find_elements(self.my_collection_game_course_btn_loc)\n <mask token>\n\n def click_my_course_btn(self):\n self.click(self.my_course_btn_loc)\n <mask token>\n\n def elements_my_course_buy_btn(self):\n return self.find_elements(self.my_course_buy_btn_loc)\n <mask token>\n\n def click_my_order_btn(self):\n self.click(self.my_order_btn_loc)\n <mask token>\n\n def elements_my_order_card_btn(self):\n return self.find_elements(self.my_order_card_btn_loc)\n <mask token>\n\n def click_my_record_btn(self):\n self.click(self.my_record_btn_loc)\n <mask token>\n\n def elements_my_record_class_btn(self):\n return self.find_elements(self.my_record_class_btn_loc)\n <mask token>\n\n def element_back_btn(self):\n return self.find_element(self.back_btn_loc)\n\n def click_back_btn(self):\n self.click(self.back_btn_loc)\n <mask token>\n\n def click_reply_5(self):\n self.click(self.reply_5_loc)\n\n def elements_reply_5(self):\n return self.find_elements(self.reply_5_loc)\n <mask token>\n\n def click_add_to_btn(self):\n self.click(self.add_to_btn_loc)\n <mask token>\n\n def input_reply_5(self, num):\n self.send_keys(self.reply_input_5_loc, num)\n <mask token>\n\n def click_send(self):\n self.click(self.send_5_loc)\n <mask token>\n\n def elements_reply_code(self):\n return self.find_elements(self.reply_code_loc)\n\n def clicks_reply_code(self, n):\n self.clicks(self.reply_code_loc, n)\n <mask token>\n\n def element_long_code(self):\n return self.find_element(self.long_code_loc)\n\n def click_long_code(self):\n self.click(self.long_code_loc)\n <mask token>\n\n def click_discern_code(self):\n self.click(self.discern_code_loc)\n <mask token>\n\n def text_class_group(self):\n return self.get_text(self.class_group_loc)\n <mask token>\n\n def element_add_group_chat(self):\n return self.find_element(self.add_group_chat_loc)\n <mask token>\n\n def elements_reply_8(self):\n return self.find_elements(self.reply_8_loc)\n <mask token>\n\n def element_parent_btn(self):\n return self.find_element(self.parent_btn_loc)\n <mask token>\n\n def elements_info_btn(self):\n return self.find_elements(self.info_btn_loc)\n\n def clicks_info_btn(self, n):\n self.clicks(self.info_btn_loc, n)\n <mask token>\n\n def click_more_games_btn(self):\n self.click(self.more_games_btn_loc)\n <mask token>\n\n def click_look_all_btn(self):\n self.click(self.look_all_btn_loc)\n\n def element_look_all_btn(self):\n return self.find_elements(self.look_all_btn_loc)\n <mask token>\n\n def text_start_fingerprint_buy(self):\n return self.get_text(self.start_fingerprint_buy_loc)\n <mask token>\n\n def click_no_more_reminder_btn(self):\n self.click(self.no_more_reminder_btn_loc)\n <mask token>\n\n def click_cancel_btn(self):\n self.click(self.cancel_btn_loc)\n <mask token>\n\n def element_usd_password(self):\n return self.find_element(self.usd_password_loc)\n\n def click_usd_password(self):\n self.click(self.usd_password_loc)\n <mask token>\n\n def element_password_error(self):\n return self.find_element(self.password_error_loc)\n <mask token>\n\n def click_again_btn(self):\n self.click(self.again_btn_loc)\n <mask token>\n\n def text_payment(self):\n return self.get_text(self.payment_loc)\n <mask token>\n\n def element_typewriting_finish_btn(self):\n return self.find_element(self.typewriting_finish_btn_loc)\n\n def click_typewriting_finish_btn(self):\n self.click(self.typewriting_finish_btn_loc)\n <mask token>\n\n def click_clock_btn(self):\n self.click(self.clock_btn_loc)\n\n def element_clock_btn(self):\n return self.find_element(self.clock_btn_loc)\n <mask token>\n\n def element_no_clock_btn(self):\n return self.find_element(self.no_clock_btn_loc)\n <mask token>\n\n def click_get_card_btn(self):\n self.click(self.get_card_btn_loc)\n <mask token>\n\n def click_upload_card_btn(self):\n self.click(self.upload_card_btn_loc)\n <mask token>\n\n def click_again_upload_card_btn(self):\n self.click(self.again_upload_card_btn_loc)\n <mask token>\n\n def click_save_img_btn(self):\n self.click(self.save_img_btn_loc)\n <mask token>\n\n def click_copy_text_btn(self):\n self.click(self.copy_text_btn_loc)\n <mask token>\n\n def element_copy_format_btn(self):\n return self.find_element(self.copy_format_btn_loc)\n <mask token>\n\n def click_card_go_btn(self):\n self.click(self.card_go_btn_loc)\n <mask token>\n\n def click_upload_btn(self):\n self.click(self.upload_btn_loc)\n <mask token>\n\n def element_today_card_btn(self):\n return self.find_element(self.today_card_btn_loc)\n <mask token>\n\n def click_reset_img_btn(self):\n self.click(self.reset_img_btn_loc)\n <mask token>\n\n def element_generated_loading(self):\n return self.find_element(self.generated_loading_loc)\n <mask token>\n\n def element_reminder_btn(self):\n return self.find_element(self.reminder_btn_loc)\n <mask token>\n\n def element_page_expired(self):\n return self.find_element(self.page_expired_loc)\n <mask token>\n\n def click_x_btn(self):\n self.click(self.x_btn_loc)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/4/14 14:31\n# @Author : lixiaofeng\n# @File : page_zaojiao.py\n# @Software: PyCharm\n# @desc :\n\nfrom common.basics import Crazy\n\n\nclass Zaojiaopage(Crazy):\n \"\"\"早教小程序\"\"\"\n\n zao_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/cx\" and @text=\"包妈优选\"]')\n\n # zao_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/cx\" and @text=\"小小包早教\"]')\n\n def click_zao(self):\n self.click(self.zao_btn_loc)\n\n def element_zao(self):\n return self.find_element(self.zao_btn_loc)\n\n find_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/d7b\" and @text=\"发现\"]') # 发现按钮\n\n def click_find(self):\n self.click(self.find_loc)\n\n title_btn_loc = ('xpath', '//*[@resource-id=\"android:id/title\" and @text=\"小程序\"]') # 发现页小程序按钮\n\n def click_title_btn(self):\n self.click(self.title_btn_loc)\n\n helper_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/c5\" and @text=\"小程序助手\"]') # 小程序助手\n\n def element_helper(self):\n return self.find_element(self.helper_loc)\n\n def click_helper(self):\n self.click(self.helper_loc)\n\n small_help_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/cx\" and @text=\"小程序助手\"]') # 小程序助手\n\n def click_small_help_btn(self):\n self.click(self.small_help_btn_loc)\n\n small_name_loc = ('xpath', '//*[contains(@text, \"包妈优选\")]') # 包妈优选\n\n def element_small_name(self):\n return self.find_element(self.small_name_loc)\n\n def click_small_name(self):\n self.click(self.small_name_loc)\n\n switching_applet_btn_loc = ('xpath', '//*[contains(@text, \"切换小程序\")]') # 切换小程序\n\n def click_switching_applet_btn(self):\n self.click(self.switching_applet_btn_loc)\n\n delete_small_btn_loc = ('xpath', '//*[contains(@text, \"删除\")]') # 删除小程序按钮\n\n def click_delete_small_btn(self):\n self.click(self.delete_small_btn_loc)\n\n edition_btn_loc = ('xpath', '//*[contains(@text, \"百宝福利Buy\")]')\n\n def element_edition_btn(self):\n return self.find_element(self.edition_btn_loc)\n\n delete_small1_btn_loc = ('xpath', '//*[contains(@text, \"拖动到此处删除\")]')\n\n def element_delete_small1_btn(self):\n return self.find_element(self.delete_small1_btn_loc)\n\n version_btn_loc = ('xpath', '//*[contains(@text, \"版本查看\")]') # 版本查看按钮\n\n def click_version_btn(self):\n self.click(self.version_btn_loc)\n\n experience_version_btn_loc = ('xpath', '//*[contains(@text, \"6.0.09\")]') # 体验版\n\n def clicks_experience_version_btn(self):\n self.clicks(self.experience_version_btn_loc, -1)\n\n audition_class_btn_loc = ('xpath', '//*[contains(@text, \"0元领取10节试听课\")]') # 领取试听课\n\n def element_audition_class_btn(self):\n return self.find_element(self.audition_class_btn_loc)\n\n def click_audition_class_btn(self):\n self.click(self.audition_class_btn_loc)\n\n wechat_grant_btn_loc = (('xpath', '//*[contains(@text, \"微信授权\") and @class=\"android.widget.Button\" ]')) # 微信授权\n\n def click_wechat_grant_btn(self):\n self.click(self.wechat_grant_btn_loc)\n\n def double_click_wechat_grant(self):\n self.double_click(self.wechat_grant_btn_loc)\n\n def element_wechat_grant_btn(self):\n return self.find_element(self.wechat_grant_btn_loc)\n\n allow_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/st\" and @text=\"允许\"]') # 完成按钮\n\n def click_allow_btn(self):\n self.click(self.allow_btn_loc)\n\n month_btn_loc = ('xpath', '//*[contains(@text, \"2018\")]') # 选择月份\n\n def click_mouth_btn(self):\n self.click(self.month_btn_loc)\n\n sure_btn_loc = ('xpath', '//*[contains(@text, \"确定\")]') # 确定按钮\n\n def click_sure_btn(self):\n self.click(self.sure_btn_loc)\n\n class_info_loc = ('xpath', '//*[contains(@text, \"课程介绍\")]') # 课程介绍\n\n # class_info_loc = ('xpath', '//android.widget.FrameLayout/android.view.ViewGroup[0]') # 课程介绍\n\n def class_info_btn(self):\n self.click(self.class_info_loc)\n\n attend_lectures_btn_loc = ('xpath', '//*[contains(@text, \"立即听课\")]') # 立即听课\n\n def element_attend_lectures_btn(self):\n return self.find_element(self.attend_lectures_btn_loc)\n\n def click_attend_lectures_btn(self):\n self.click(self.attend_lectures_btn_loc)\n\n class_btn_loc = ('xpath', '//*[contains(@text, \"预备课 预备课\")]') # 预备课 预备课\n\n def element_class_btn(self):\n return self.find_element(self.class_btn_loc)\n\n get_to_know_btn_loc = ('xpath', '//*[contains(@text, \"立即了解正式课 \")]') # 立即了解正式课\n\n def click_get_to_know_btn(self):\n self.click(self.get_to_know_btn_loc)\n\n def element_get_to_know_btn(self):\n return self.find_element(self.get_to_know_btn_loc)\n\n sure_buy_btn_loc = ('xpath', '//*[contains(@text, \"立即购买\")]') # 立即购买\n\n def click_sure_buy_btn(self):\n self.click(self.sure_buy_btn_loc)\n\n buy_password_loc = ('id', 'com.tencent.mm:id/cfs') # 输入支付密码\n\n def input_buy_password(self, paw):\n self.send_keys(self.buy_password_loc, paw)\n\n check_buy_money_loc = ('id', 'com.tencent.mm:id/dlh') # 获取支付金额\n\n def text_buy_money(self):\n return self.get_text(self.check_buy_money_loc)\n\n success_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/f8o\" and @text=\"完成\"]') # 完成按钮\n\n def click_success_btn(self):\n self.click(self.success_btn_loc)\n\n check_address_btn_loc = ('xpath', '//*[contains(@text, \"收货地址:请选择地址\")]') # 选择收货地址\n\n def click_check_address_btn(self):\n self.click(self.check_address_btn_loc)\n\n def element_check_address_btn(self):\n return self.find_element(self.check_address_btn_loc)\n\n add_address_btn_loc = ('xpath', '//*[contains(@text, \"添加地址\")]') # 添加地址\n\n def click_add_address_btn(self):\n self.click(self.add_address_btn_loc)\n\n name_loc = ('xpath', '//*[contains(@text, \"请输入你的姓名\")]') # 请输入你的姓名\n\n def input_name_btn(self, name):\n self.send_keys(self.name_loc, name)\n\n phone_btn_loc = ('xpath', '//*[contains(@text, \"请填写收件人电话\")]') # 请填写收件人电话\n\n def input_phone_btn(self, phone):\n self.send_keys(self.phone_btn_loc, phone)\n\n region_btn_loc = ('xpath', '//*[contains(@text, \"请输入你所在地区\")]') # 请输入你所在地区\n\n def click_region_btn(self):\n self.click(self.region_btn_loc)\n\n detailed_address_btn_loc = ('xpath', '//*[contains(@text, \"请输入你的详细地址\")]') # 请输入你的详细地址\n\n def input_detailed_address_btn(self, address):\n self.send_keys(self.detailed_address_btn_loc, address)\n\n save_btn_loc = ('xpath', '//*[contains(@text, \"保存\")]') # 保存\n\n def click_save_btn(self):\n self.click(self.save_btn_loc)\n\n receive_btn_loc = ('xpath', '//*[contains(@text, \"立即领取\")]') # 立即领取\n\n def click_receive_btn(self):\n self.click(self.receive_btn_loc)\n\n addressee_loc = ('xpath', '//*[contains(@text, \"收件人:\")]') # 地址列表是否有地址信息\n\n def elements_addressee(self):\n return self.find_elements(self.addressee_loc)\n\n def clicks_addressee(self):\n self.clicks(self.addressee_loc, 0)\n\n know_btn_loc = ('xpath', '//*[contains(@text, \"知道了\")]') # 地址列表是否有地址信息\n\n def element_know(self):\n return self.find_element(self.know_btn_loc)\n\n def click_know(self):\n self.click(self.know_btn_loc)\n\n all_curriculum_btn_loc = ('xpath', '//*[contains(@text, \"查看全部课程\")]') # 查看全部课程\n\n def element_all_curriculum_btn(self):\n return self.find_element(self.all_curriculum_btn_loc)\n\n def click_all_curriculum_btn(self):\n self.click(self.all_curriculum_btn_loc)\n\n curriculum_date_btn_loc = ('xpath', '//*[contains(@text, \"2019-0\")]') # 历史推送\n\n def element_curriculum_date_btn(self):\n return self.find_element(self.curriculum_date_btn_loc)\n\n my_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/ct\" and @text=\"我的\"]') # 我的\n\n def element_my_btn(self):\n return self.find_element(self.my_btn_loc)\n\n def click_my(self):\n self.click(self.my_btn_loc)\n\n my_baby_btn_loc = ('xpath', '//*[contains(@text, \"我的宝宝\")]') # 我的宝宝\n\n def click_my_baby(self):\n self.click(self.my_baby_btn_loc)\n\n my_baby_title_loc = ('id', 'com.tencent.mm:id/ox')\n\n def text_my_baby_title(self):\n return self.get_text(self.my_baby_title_loc)\n\n def elements_title(self):\n return self.find_elements(self.my_baby_title_loc)\n\n new_baby_btn_loc = ('xpath', '//*[contains(@text, \"新建宝宝\")]') # 新建宝宝\n\n def element_new_baby_btn(self):\n return self.find_element(self.new_baby_btn_loc)\n\n def click_new_baby_btn(self):\n self.click(self.new_baby_btn_loc)\n\n def clicks_new_baby_btn(self, n):\n self.clicks(self.new_baby_btn_loc, n)\n\n get_set_loc = ('xpath', '//*[contains(@text, \"预备课 预备课\")]') # 新建宝宝\n\n def element_get_set(self):\n return self.find_element(self.get_set_loc)\n\n next_btn_loc = ('xpath', '//*[contains(@text, \"下一步\")]') # 我的宝宝\n\n def click_next(self):\n self.click(self.next_btn_loc)\n\n baby_name_loc = ('xpath', '//*[contains(@text, \"请输入宝宝姓名\")]') # 请输入宝宝姓名\n\n def inputs_baby_name(self, name, n):\n self.sends_keys(self.baby_name_loc, name, n)\n\n baby_bir_btn_loc = ('xpath', '//*[contains(@text, \"宝宝的生日:\")]') # 宝宝的生日\n\n def click_baby_bir_btn(self):\n self.click(self.baby_bir_btn_loc)\n\n finish_btn_loc = ('xpath', '//*[contains(@text, \"完成\")]') # 完成按钮\n\n def click_finish_btn(self):\n self.click(self.finish_btn_loc)\n\n def clicks_finish_btn(self, n):\n self.clicks(self.finish_btn_loc, n)\n\n my_home_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/ct\" and @text=\"首页\"]') # 首页\n\n def click_my_home(self):\n self.click(self.my_home_loc)\n\n def element_my_home(self):\n return self.find_element(self.my_home_loc)\n\n switch_btn_loc = ('xpath', '//*[contains(@text, \"切换\")]') # 切换\n\n def click_switch_btn(self):\n self.click(self.switch_btn_loc)\n\n baby_bri_loc = ('xpath', '//*[contains(@text, \"宝宝生日:\")]') # 宝宝生日:\n\n def click_baby_bri(self):\n self.click(self.baby_bri_loc)\n\n class_img_btn_loc = ('xpath', 'android.widget.Image')\n\n def clicks_class_img(self):\n self.clicks(self.class_img_btn_loc, 0)\n\n collection_btn_loc = ('xpath', '//*[contains(@text, \"收藏\")]') # 收藏\n\n def click_collection_btn(self):\n self.click(self.collection_btn_loc)\n\n def clicks_collection_btn(self, n):\n self.clicks(self.collection_btn_loc, n)\n\n def element_collection_btn(self):\n return self.find_element(self.collection_btn_loc)\n\n write_record_btn_loc = ('xpath', '//*[contains(@text, \"写记录\") and @class=\"android.widget.Button\" ]') # 写记录按钮\n\n def click_write_record_btn(self):\n self.click(self.write_record_btn_loc)\n\n def clicks_write_record_btn(self, n):\n self.clicks(self.write_record_btn_loc, n)\n\n album_btn_loc = ('xpath', '//*[contains(@text, \"相册\")]') # 相册\n\n def click_album_btn(self):\n self.click(self.album_btn_loc)\n\n def element_album_btn(self):\n return self.find_element(self.album_btn_loc)\n\n small_video_btn_loc = ('xpath', '//*[contains(@text, \"小视频\")]') # 小视频\n\n def click_small_video_btn(self):\n self.click(self.small_video_btn_loc)\n\n def element_small_video_btn(self):\n return self.find_element(self.small_video_btn_loc)\n\n release_btn_loc = ('xpath', '//*[contains(@text, \"发布\")]') # 发布\n\n def click_release_btn(self):\n self.click(self.release_btn_loc)\n\n def clicks_release_btn(self, n):\n self.clicks(self.release_btn_loc, n)\n\n def element_record_info(self, data): # 判断是否定位到包含text的元素\n record_info_loc = ('xpath', '//*[contains(@text, \"{}\")]'.format(data))\n record_info = self.find_element(record_info_loc)\n if record_info:\n return True\n else:\n return False\n\n class_name_loc = ('xpath', '//*[contains(@text, \"歌曲\")]') # 课程名称\n\n # class_name_loc = ('xpath', '//*[contains(@text, \"歌曲:Head and shoulders\")]') # 课程名称\n\n def click_class_name(self):\n self.click(self.class_name_loc)\n\n def clicks_class_name(self, n):\n self.clicks(self.class_name_loc, n)\n\n def elements_class_name(self):\n return self.find_elements(self.class_name_loc)\n\n class_name2_loc = ('xpath', '//*[contains(@text, \"一起走\")]') # 课程名称\n\n # class_name2_loc = ('xpath', '//*[contains(@text, \"弹出来的画\")]') # 课程名称\n\n def click_class2_name(self):\n self.click(self.class_name2_loc)\n\n def clicks_class2_name(self, n):\n self.clicks(self.class_name2_loc, n)\n\n write_text_loc = ('xpath', '//*[contains(@text, \"0/1000\")]') # 写记录\n\n def input_write_text(self, text):\n self.send_keys(self.write_text_loc, text)\n\n def inputs_write_text(self, text, n):\n self.sends_keys(self.write_text_loc, text, n)\n\n choice_album_loc = ('id', 'com.tencent.mm:id/bpy')\n\n def clicks_choice_album(self, n):\n self.clicks(self.choice_album_loc, n)\n\n def elements_choice_album(self):\n return self.find_elements(self.choice_album_loc)\n\n complete_btn_loc = ('id', 'com.tencent.mm:id/ki') # 完成\n\n def click_complete_btn(self):\n self.click(self.complete_btn_loc)\n\n my_collection_btn_loc = ('xpath', '//*[contains(@text, \"我的收藏\")]') # 我的收藏\n\n def click_my_collection_btn(self):\n self.click(self.my_collection_btn_loc)\n\n my_collection_english_course_btn_loc = ('xpath', '//*[contains(@text, \"早教\")]') # 早教英语课\n\n def elements_my_collection_english_course_btn(self):\n return self.find_elements(self.my_collection_english_course_btn_loc)\n\n my_collection_game_course_btn_loc = ('xpath', '//*[contains(@text, \"宝宝游戏馆\")]') # 宝宝游戏馆\n\n def elements_my_collection_game_course_btn(self):\n return self.find_elements(self.my_collection_game_course_btn_loc)\n\n my_course_btn_loc = ('xpath', '//*[contains(@text, \"我的课程\")]') # 我的课程\n\n def click_my_course_btn(self):\n self.click(self.my_course_btn_loc)\n\n my_course_buy_btn_loc = ('xpath', '//*[contains(@text, \"早教核心课年卡\")]') # 早教核心课年卡\n\n def elements_my_course_buy_btn(self):\n return self.find_elements(self.my_course_buy_btn_loc)\n\n my_order_btn_loc = ('xpath', '//*[contains(@text, \"我的订单\")]') # 我的订单\n\n def click_my_order_btn(self):\n self.click(self.my_order_btn_loc)\n\n my_order_card_btn_loc = ('xpath', '//*[contains(@text, \"订单编号:\")]') # 订单编号:\n\n def elements_my_order_card_btn(self):\n return self.find_elements(self.my_order_card_btn_loc)\n\n my_record_btn_loc = ('xpath', '//*[contains(@text, \"成长记录\")]') # 成长记录\n\n def click_my_record_btn(self):\n self.click(self.my_record_btn_loc)\n\n my_record_class_btn_loc = ('xpath', '//*[contains(@text, \"#\")]') # # 测试英语课程组\n\n def elements_my_record_class_btn(self):\n return self.find_elements(self.my_record_class_btn_loc)\n\n back_btn_loc = (\n 'xpath', '//*[@resource-id=\"com.tencent.mm:id/on\" and @class=\"android.widget.LinearLayout\"]') # 返回按钮\n\n def element_back_btn(self):\n return self.find_element(self.back_btn_loc)\n\n def click_back_btn(self):\n self.click(self.back_btn_loc)\n\n reply_5_loc = ('xpath', '//android.widget.Image') # 回复5\n\n def click_reply_5(self):\n self.click(self.reply_5_loc)\n\n def elements_reply_5(self):\n return self.find_elements(self.reply_5_loc)\n\n add_to_btn_loc = ('xpath', '//*[contains(@text, \"立即添加\")]') # 立即添加\n\n def click_add_to_btn(self):\n self.click(self.add_to_btn_loc)\n\n reply_input_5_loc = ('id', 'com.tencent.mm:id/ami')\n\n def input_reply_5(self, num):\n self.send_keys(self.reply_input_5_loc, num)\n\n send_5_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/amp\" and @text=\"发送\"]') # 发送\n\n def click_send(self):\n self.click(self.send_5_loc)\n\n reply_code_loc = ('id', 'com.tencent.mm:id/ap9') # 获取回复的二维码\n\n def elements_reply_code(self):\n return self.find_elements(self.reply_code_loc)\n\n def clicks_reply_code(self, n):\n self.clicks(self.reply_code_loc, n)\n\n long_code_loc = ('id', 'com.tencent.mm:id/adi') # 长按二维码\n\n def element_long_code(self):\n return self.find_element(self.long_code_loc)\n\n def click_long_code(self):\n self.click(self.long_code_loc)\n\n discern_code_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/cx\" and @text=\"识别图中二维码\"]') # 识别图中二维码\n\n def click_discern_code(self):\n self.click(self.discern_code_loc)\n\n class_group_loc = ('id', 'android:id/text1') # 群名称\n\n def text_class_group(self):\n return self.get_text(self.class_group_loc)\n\n add_group_chat_loc = ('xpath', '//*[contains(@text, \"加入该群聊\")]') # 加入该群聊\n\n def element_add_group_chat(self):\n return self.find_element(self.add_group_chat_loc)\n\n reply_8_loc = ('xpath', '//android.widget.Image') # 回复8的banner 回复8->进公众号->点击推送 看到的二维码\n\n def elements_reply_8(self):\n return self.find_elements(self.reply_8_loc)\n\n parent_btn_loc = ('xpath', '//*[contains(@text, \"亲爱的家长:\")]') # 亲爱的家长:\n\n def element_parent_btn(self):\n return self.find_element(self.parent_btn_loc)\n\n info_btn_loc = ('id', 'com.tencent.mm:id/a8q') # 详情\n\n def elements_info_btn(self):\n return self.find_elements(self.info_btn_loc)\n\n def clicks_info_btn(self, n):\n self.clicks(self.info_btn_loc, n)\n\n more_games_btn_loc = ('xpath', '//*[contains(@text, \"更多亲子游戏\")]') # 更多亲子游戏\n\n def click_more_games_btn(self):\n self.click(self.more_games_btn_loc)\n\n look_all_btn_loc = ('xpath', '//*[contains(@text, \"查看全部\")]') # 查看全部\n\n def click_look_all_btn(self):\n self.click(self.look_all_btn_loc)\n\n def element_look_all_btn(self):\n return self.find_elements(self.look_all_btn_loc)\n\n start_fingerprint_buy_loc = ('id', 'com.tencent.mm:id/btp') # 开启指纹支付弹窗文本 开启指纹支付,支付时可通过验证指纹快速完成付款。\n\n def text_start_fingerprint_buy(self):\n return self.get_text(self.start_fingerprint_buy_loc)\n\n no_more_reminder_btn_loc = ('id', 'com.tencent.mm:id/btq') # 不再提醒\n\n def click_no_more_reminder_btn(self):\n self.click(self.no_more_reminder_btn_loc)\n\n cancel_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/azz\" and @text=\"取消\"]') # 取消\n\n def click_cancel_btn(self):\n self.click(self.cancel_btn_loc)\n\n usd_password_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/fg4\" and @text=\"使用密码\"]') # 使用密码\n\n def element_usd_password(self):\n return self.find_element(self.usd_password_loc)\n\n def click_usd_password(self):\n self.click(self.usd_password_loc)\n\n password_error_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/d8x\" and @text=\"支付密码错误,请重试\"]') # 支付密码错误,请重试\n\n def element_password_error(self):\n return self.find_element(self.password_error_loc)\n\n again_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/azz\" and @text=\"重试\"]') # 重试\n\n def click_again_btn(self):\n self.click(self.again_btn_loc)\n\n payment_loc = ('id', 'com.tencent.mm:id/fg3') # 请输入支付密码 文本\n\n def text_payment(self):\n return self.get_text(self.payment_loc)\n\n typewriting_finish_btn_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/z2\" and @text=\"完成\"]') # 输入法上的完成按钮\n\n def element_typewriting_finish_btn(self):\n return self.find_element(self.typewriting_finish_btn_loc)\n\n def click_typewriting_finish_btn(self):\n self.click(self.typewriting_finish_btn_loc)\n\n # 打卡\n\n clock_btn_loc = ('xpath', '//*[contains(@text, \"打卡\")]') # 打卡\n\n def click_clock_btn(self):\n self.click(self.clock_btn_loc)\n\n def element_clock_btn(self):\n return self.find_element(self.clock_btn_loc)\n\n # com.tencent.mm:id/ox\n\n no_clock_btn_loc = ('xpath', '//*[contains(@text, \"你还未开启打卡\")]') # 你还未开启打卡\n\n def element_no_clock_btn(self):\n return self.find_element(self.no_clock_btn_loc)\n\n get_card_btn_loc = ('xpath', '//*[@text=\"获取打卡海报\" and @class=\"android.widget.Button\"]') # 获取打卡海报\n\n def click_get_card_btn(self):\n self.click(self.get_card_btn_loc)\n\n upload_card_btn_loc = ('xpath', '//*[@text=\"上传截图\" and @class=\"android.widget.Button\"]') # 上传截图\n\n def click_upload_card_btn(self):\n self.click(self.upload_card_btn_loc)\n\n again_upload_card_btn_loc = ('xpath', '//*[@text=\"重新上传截图\" and @class=\"android.widget.Button\"]') # 重新上传截图\n\n def click_again_upload_card_btn(self):\n self.click(self.again_upload_card_btn_loc)\n\n save_img_btn_loc = ('xpath', '//*[@text=\"保存图片\" and @class=\"android.widget.Button\"]') # 保存图片\n\n def click_save_img_btn(self):\n self.click(self.save_img_btn_loc)\n\n copy_text_btn_loc = ('xpath', '//*[@text=\"复制发圈文案\" and @class=\"android.widget.Button\"]') # 复制发圈文案\n\n def click_copy_text_btn(self):\n self.click(self.copy_text_btn_loc)\n\n copy_format_btn_loc = ('xpath', '//*[contains(@text, \"发布朋友圈截图规范\")]') # 发布朋友圈截图规范\n\n def element_copy_format_btn(self):\n return self.find_element(self.copy_format_btn_loc)\n\n card_go_btn_loc = ('xpath', '//*[contains(@text, \"关闭小程序,去朋友圈打卡截图\")]') # 关闭小程序,去朋友圈打卡截图\n\n def click_card_go_btn(self):\n self.click(self.card_go_btn_loc)\n\n upload_btn_loc = ('xpath', '//*[@text=\"上传\" and @class=\"android.widget.Button\"]') # 上传\n\n def click_upload_btn(self):\n self.click(self.upload_btn_loc)\n\n today_card_btn_loc = ('xpath', '//*[contains(@text, \"今日已提交打卡\")]') # 今日已提交打卡\n\n def element_today_card_btn(self):\n return self.find_element(self.today_card_btn_loc)\n\n reset_img_btn_loc = ('xpath', '//*[@text=\"重新选择截图\" and @class=\"android.widget.Button\"]') # 重新选择截图\n\n def click_reset_img_btn(self):\n self.click(self.reset_img_btn_loc)\n\n generated_loading_loc = ('xpath', '//*[@resource-id=\"com.tencent.mm:id/cx\" and @text=\"正在生成...\"]') # 正在生成...\n\n def element_generated_loading(self):\n return self.find_element(self.generated_loading_loc)\n\n reminder_btn_loc = ('xpath', '//*[contains(@text, \"温馨提示\")]') # 温馨提示\n\n def element_reminder_btn(self):\n return self.find_element(self.reminder_btn_loc)\n\n page_expired_loc = ('xpath', '//*[contains(@text, \"页面已经过期\")]') # 页面已经过期\n\n def element_page_expired(self):\n return self.find_element(self.page_expired_loc)\n\n x_btn_loc = ('id', 'com.tencent.mm:id/kx')\n\n def click_x_btn(self):\n self.click(self.x_btn_loc)\n",
"step-ids": [
73,
89,
121,
148,
152
]
}
|
[
73,
89,
121,
148,
152
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def select_from_model(dataframe):
X = dataframe.iloc[:, :-1]
y = dataframe.iloc[:, -1]
np.random.seed(9)
model = RandomForestClassifier()
sfm = SelectFromModel(model)
sfm = sfm.fit(X, y)
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
return list(feature_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = pd.read_csv('data/house_prices_multivariate.csv')
def select_from_model(dataframe):
X = dataframe.iloc[:, :-1]
y = dataframe.iloc[:, -1]
np.random.seed(9)
model = RandomForestClassifier()
sfm = SelectFromModel(model)
sfm = sfm.fit(X, y)
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
return list(feature_name)
<|reserved_special_token_1|>
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
data = pd.read_csv('data/house_prices_multivariate.csv')
def select_from_model(dataframe):
X = dataframe.iloc[:, :-1]
y = dataframe.iloc[:, -1]
np.random.seed(9)
model = RandomForestClassifier()
sfm = SelectFromModel(model)
sfm = sfm.fit(X, y)
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
return list(feature_name)
<|reserved_special_token_1|>
# Default imports
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
data = pd.read_csv('data/house_prices_multivariate.csv')
# Your solution code here
def select_from_model(dataframe):
X = dataframe.iloc[:, :-1]
y = dataframe.iloc[:, -1]
np.random.seed(9)
model = RandomForestClassifier()
sfm = SelectFromModel(model)
sfm = sfm.fit(X, y)
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
return list(feature_name)
|
flexible
|
{
"blob_id": "d6791c8122129a46631582e7d9339ea08bd2e92b",
"index": 3183,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef select_from_model(dataframe):\n X = dataframe.iloc[:, :-1]\n y = dataframe.iloc[:, -1]\n np.random.seed(9)\n model = RandomForestClassifier()\n sfm = SelectFromModel(model)\n sfm = sfm.fit(X, y)\n feature_idx = sfm.get_support()\n feature_name = X.columns[feature_idx]\n return list(feature_name)\n",
"step-3": "<mask token>\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\n\ndef select_from_model(dataframe):\n X = dataframe.iloc[:, :-1]\n y = dataframe.iloc[:, -1]\n np.random.seed(9)\n model = RandomForestClassifier()\n sfm = SelectFromModel(model)\n sfm = sfm.fit(X, y)\n feature_idx = sfm.get_support()\n feature_name = X.columns[feature_idx]\n return list(feature_name)\n",
"step-4": "from sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport numpy as np\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\n\ndef select_from_model(dataframe):\n X = dataframe.iloc[:, :-1]\n y = dataframe.iloc[:, -1]\n np.random.seed(9)\n model = RandomForestClassifier()\n sfm = SelectFromModel(model)\n sfm = sfm.fit(X, y)\n feature_idx = sfm.get_support()\n feature_name = X.columns[feature_idx]\n return list(feature_name)\n",
"step-5": "# Default imports\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\n\n# Your solution code here\n\ndef select_from_model(dataframe):\n X = dataframe.iloc[:, :-1]\n y = dataframe.iloc[:, -1]\n np.random.seed(9)\n model = RandomForestClassifier()\n\n sfm = SelectFromModel(model)\n sfm = sfm.fit(X, y)\n\n feature_idx = sfm.get_support()\n feature_name = X.columns[feature_idx]\n\n return list(feature_name)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Address(DocumentTemplate):
<|reserved_special_token_0|>
city: 'City'
coordinates: List['Coordinates']
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: 'Address'
name: str
phone: str
type_of: 'Brewery_Type'
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: 'State'
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Address(DocumentTemplate):
_subdocument = []
city: 'City'
coordinates: List['Coordinates']
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: 'Address'
name: str
phone: str
type_of: 'Brewery_Type'
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: 'State'
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
<|reserved_special_token_1|>
from typing import List
from terminusdb_client.woqlschema import DocumentTemplate, EnumTemplate, RandomKey, ValueHashKey
class Address(DocumentTemplate):
_subdocument = []
city: 'City'
coordinates: List['Coordinates']
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: 'Address'
name: str
phone: str
type_of: 'Brewery_Type'
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: 'State'
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
<|reserved_special_token_1|>
####
# This is the script for storing the schema of your TerminusDB
# database for your project.
# Use 'terminusdb commit' to commit changes to the database and
# use 'terminusdb sync' to change this file according to
# the exsisting database schema
####
from typing import List
from terminusdb_client.woqlschema import (
DocumentTemplate,
EnumTemplate,
RandomKey,
ValueHashKey,
)
class Address(DocumentTemplate):
_subdocument = []
city: "City"
coordinates: List["Coordinates"]
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: "Address"
name: str
phone: str
type_of: "Brewery_Type"
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: "State"
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: "Country"
name: str
|
flexible
|
{
"blob_id": "f702cdef3782ddc96244f3cf8e2026581d60baa9",
"index": 1537,
"step-1": "<mask token>\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-2": "<mask token>\n\n\nclass Address(DocumentTemplate):\n <mask token>\n city: 'City'\n coordinates: List['Coordinates']\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: 'Address'\n name: str\n phone: str\n type_of: 'Brewery_Type'\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: 'State'\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-3": "<mask token>\n\n\nclass Address(DocumentTemplate):\n _subdocument = []\n city: 'City'\n coordinates: List['Coordinates']\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: 'Address'\n name: str\n phone: str\n type_of: 'Brewery_Type'\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: 'State'\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-4": "from typing import List\nfrom terminusdb_client.woqlschema import DocumentTemplate, EnumTemplate, RandomKey, ValueHashKey\n\n\nclass Address(DocumentTemplate):\n _subdocument = []\n city: 'City'\n coordinates: List['Coordinates']\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: 'Address'\n name: str\n phone: str\n type_of: 'Brewery_Type'\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: 'State'\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-5": "####\n# This is the script for storing the schema of your TerminusDB\n# database for your project.\n# Use 'terminusdb commit' to commit changes to the database and\n# use 'terminusdb sync' to change this file according to\n# the exsisting database schema\n####\n\nfrom typing import List\n\nfrom terminusdb_client.woqlschema import (\n DocumentTemplate,\n EnumTemplate,\n RandomKey,\n ValueHashKey,\n)\n\n\nclass Address(DocumentTemplate):\n _subdocument = []\n city: \"City\"\n coordinates: List[\"Coordinates\"]\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: \"Address\"\n name: str\n phone: str\n type_of: \"Brewery_Type\"\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: \"State\"\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: \"Country\"\n name: str\n",
"step-ids": [
2,
13,
14,
15,
16
]
}
|
[
2,
13,
14,
15,
16
] |
import json
import boto3
import os
from helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2
def getEC2FilteredRegionalInstanceInfo(region):
ec2RegionalClient = boto3.client('ec2', region_name = region)
paginator = ec2RegionalClient.get_paginator('describe_instances')
page_iterator = paginator.paginate()
allEC2Instances = []
for result in page_iterator:
for reservation in result['Reservations']:
for instance in reservation['Instances']:
allEC2Instances.append({'InstanceId': instance['InstanceId'] , 'Tags': instance.get('Tags',[])})
return excludeOptedOutEC2Instances(allEC2Instances)
def isOutputedOutEC2Instance(instanceInfo):
if any( (d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True') for d in instanceInfo['Tags']):
return True
def excludeOptedOutEC2Instances(ec2Instances):
filteredEC2InstanceIdList = []
for instanceInfo in ec2Instances:
if isOutputedOutEC2Instance(instanceInfo):
print('Exlcuding instance {}'.format(instanceInfo))
else:
filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])
return filteredEC2InstanceIdList
def gatherEC2Info():
regionList = getEC2Regions()
ec2RegionDict = {}
for region in regionList:
regionalInstances = getEC2FilteredRegionalInstanceInfo(region)
if len(regionalInstances)>0:
ec2RegionDict[region]=regionalInstances
return ec2RegionDict
def handler(event, context):
ec2RegionalInfo = gatherEC2Info()
if len(ec2RegionalInfo.keys())!=0:
print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))
messageAttributes = {
'notificationFor': {
'DataType': 'String',
'StringValue': SNS_NOTIFICATION_IIAS_EC2
}
}
sendDataToSNS(ec2RegionalInfo,messageAttributes)
else:
print('No new EC2 instances in IIAS scope')
|
normal
|
{
"blob_id": "d5f1601d11eb54e6c3dafab0137ec8f2358bb568",
"index": 4101,
"step-1": "<mask token>\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\n<mask token>\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-2": "<mask token>\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\n<mask token>\n\n\ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances) > 0:\n ec2RegionDict[region] = regionalInstances\n return ec2RegionDict\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-3": "<mask token>\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\ndef excludeOptedOutEC2Instances(ec2Instances):\n filteredEC2InstanceIdList = []\n for instanceInfo in ec2Instances:\n if isOutputedOutEC2Instance(instanceInfo):\n print('Exlcuding instance {}'.format(instanceInfo))\n else:\n filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])\n return filteredEC2InstanceIdList\n\n\ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances) > 0:\n ec2RegionDict[region] = regionalInstances\n return ec2RegionDict\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-4": "import json\nimport boto3\nimport os\nfrom helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\ndef excludeOptedOutEC2Instances(ec2Instances):\n filteredEC2InstanceIdList = []\n for instanceInfo in ec2Instances:\n if isOutputedOutEC2Instance(instanceInfo):\n print('Exlcuding instance {}'.format(instanceInfo))\n else:\n filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])\n return filteredEC2InstanceIdList\n\n\ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances) > 0:\n ec2RegionDict[region] = regionalInstances\n return ec2RegionDict\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-5": "import json\nimport boto3\nimport os\nfrom helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name = region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'] , 'Tags': instance.get('Tags',[])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any( (d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True') for d in instanceInfo['Tags']):\n return True\n\n \ndef excludeOptedOutEC2Instances(ec2Instances):\n filteredEC2InstanceIdList = []\n for instanceInfo in ec2Instances:\n if isOutputedOutEC2Instance(instanceInfo):\n print('Exlcuding instance {}'.format(instanceInfo))\n else:\n filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])\n return filteredEC2InstanceIdList\n \n \ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances)>0:\n ec2RegionDict[region]=regionalInstances\n return ec2RegionDict\n\n\n \ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys())!=0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {\n 'notificationFor': {\n 'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2\n }\n }\n sendDataToSNS(ec2RegionalInfo,messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#coding=utf-8
from django import template
from classytags.helpers import InclusionTag
from classytags.core import Tag, Options
from classytags.arguments import Argument
from ratings.models import RatedItem
from blogs.permissions import Permissions
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(
Argument('obj', required=True),
)
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError("Ожидался экземпляр django.models.Model, а получили %s." % type(obj))
can_vote = True
if 'user' in context and\
getattr(obj, 'permissions', None) and\
isinstance(obj.permissions, Permissions) and\
hasattr(obj.permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {
'content_type': str(obj._meta),
'obj_pk': obj.pk,
'can_vote': can_vote,
'score': RatedItem.objects.score_for_obj(obj),
}
register = template.Library()
register.tag(RatingBlock)
|
normal
|
{
"blob_id": "1a05817c4c16f2d9234e504b0c98f9c9ae2dc3f7",
"index": 1525,
"step-1": "<mask token>\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\n<mask token>\nregister.tag(RatingBlock)\n",
"step-3": "<mask token>\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\nregister = template.Library()\nregister.tag(RatingBlock)\n",
"step-4": "from django import template\nfrom classytags.helpers import InclusionTag\nfrom classytags.core import Tag, Options\nfrom classytags.arguments import Argument\nfrom ratings.models import RatedItem\nfrom blogs.permissions import Permissions\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\nregister = template.Library()\nregister.tag(RatingBlock)\n",
"step-5": "#coding=utf-8\nfrom django import template\n\nfrom classytags.helpers import InclusionTag\nfrom classytags.core import Tag, Options\nfrom classytags.arguments import Argument\n\nfrom ratings.models import RatedItem\nfrom blogs.permissions import Permissions\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n\n options = Options(\n Argument('obj', required=True),\n )\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\"Ожидался экземпляр django.models.Model, а получили %s.\" % type(obj))\n can_vote = True\n if 'user' in context and\\\n getattr(obj, 'permissions', None) and\\\n isinstance(obj.permissions, Permissions) and\\\n hasattr(obj.permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n\n\n return {\n 'content_type': str(obj._meta),\n 'obj_pk': obj.pk,\n 'can_vote': can_vote,\n 'score': RatedItem.objects.score_for_obj(obj),\n }\n\n\nregister = template.Library()\nregister.tag(RatingBlock)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from pynput.keyboard import Listener
import logging
import daemon
import socket
import thread
logging.basicConfig(format="%(asctime)s:%(message)s")
file_logger = logging.FileHandler("/home/user0308/logger.log", "a")
logger = logging.getLogger()
logger.addHandler(file_logger)
logger.setLevel(logging.DEBUG)
def press(key):
logging.info(key)
def work():
with Listener(on_press = press) as listener:
listener.join()
with daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):
work()
|
normal
|
{
"blob_id": "3dc2d9a5e37ce1f546c0478de5a0bb777238ad00",
"index": 4306,
"step-1": "<mask token>\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(format='%(asctime)s:%(message)s')\n<mask token>\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n work()\n",
"step-3": "<mask token>\nlogging.basicConfig(format='%(asctime)s:%(message)s')\nfile_logger = logging.FileHandler('/home/user0308/logger.log', 'a')\nlogger = logging.getLogger()\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n work()\n",
"step-4": "from pynput.keyboard import Listener\nimport logging\nimport daemon\nimport socket\nimport thread\nlogging.basicConfig(format='%(asctime)s:%(message)s')\nfile_logger = logging.FileHandler('/home/user0308/logger.log', 'a')\nlogger = logging.getLogger()\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n work()\n",
"step-5": "from pynput.keyboard import Listener\nimport logging\nimport daemon\nimport socket\nimport thread\n\nlogging.basicConfig(format=\"%(asctime)s:%(message)s\")\nfile_logger = logging.FileHandler(\"/home/user0308/logger.log\", \"a\")\nlogger = logging.getLogger()\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\ndef work():\n\twith Listener(on_press = press) as listener:\n \tlistener.join()\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n\t\twork()\t\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture(scope='session', autouse=True)
def set_up(request):
""" conftest.py set_up - the first to start.... """
print('\nSETUP before all tests')
request.addfinalizer(tear_down)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tear_down():
""" conftest.py tear_down - the last to go.... """
print('\nTEARDOWN after all tests')
@pytest.fixture(scope='session', autouse=True)
def set_up(request):
""" conftest.py set_up - the first to start.... """
print('\nSETUP before all tests')
request.addfinalizer(tear_down)
<|reserved_special_token_1|>
import pytest
import requests
def tear_down():
""" conftest.py tear_down - the last to go.... """
print('\nTEARDOWN after all tests')
@pytest.fixture(scope='session', autouse=True)
def set_up(request):
""" conftest.py set_up - the first to start.... """
print('\nSETUP before all tests')
request.addfinalizer(tear_down)
<|reserved_special_token_1|>
# content of conftest.py
# adapted from http://pytest.org/latest/example/special.html
import pytest
import requests
def tear_down():
''' conftest.py tear_down - the last to go.... '''
print("\nTEARDOWN after all tests")
@pytest.fixture(scope="session", autouse=True)
def set_up(request):
''' conftest.py set_up - the first to start.... '''
print("\nSETUP before all tests")
request.addfinalizer(tear_down)
|
flexible
|
{
"blob_id": "816b1a932208a4525230dd886adf8c67dec3af3e",
"index": 349,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef set_up(request):\n \"\"\" conftest.py set_up - the first to start.... \"\"\"\n print('\\nSETUP before all tests')\n request.addfinalizer(tear_down)\n",
"step-3": "<mask token>\n\n\ndef tear_down():\n \"\"\" conftest.py tear_down - the last to go.... \"\"\"\n print('\\nTEARDOWN after all tests')\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef set_up(request):\n \"\"\" conftest.py set_up - the first to start.... \"\"\"\n print('\\nSETUP before all tests')\n request.addfinalizer(tear_down)\n",
"step-4": "import pytest\nimport requests\n\n\ndef tear_down():\n \"\"\" conftest.py tear_down - the last to go.... \"\"\"\n print('\\nTEARDOWN after all tests')\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef set_up(request):\n \"\"\" conftest.py set_up - the first to start.... \"\"\"\n print('\\nSETUP before all tests')\n request.addfinalizer(tear_down)\n",
"step-5": "# content of conftest.py\n# adapted from http://pytest.org/latest/example/special.html\n\nimport pytest\nimport requests\n\ndef tear_down():\n ''' conftest.py tear_down - the last to go.... '''\n print(\"\\nTEARDOWN after all tests\")\n \n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef set_up(request):\n ''' conftest.py set_up - the first to start.... '''\n\n print(\"\\nSETUP before all tests\")\n request.addfinalizer(tear_down)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from dataclasses import dataclass
from dotenv import load_dotenv
from fastapi.security import OAuth2PasswordBearer
from passlib.context import CryptContext
load_dotenv()
@dataclass
class Settings:
SECRET_KEY = os.getenv("SECRET_KEY", "mysecret")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
settings = Settings()
|
normal
|
{
"blob_id": "a1c5d86a3f042d9e5ba522726191c8aeb9b738ed",
"index": 8018,
"step-1": "<mask token>\n\n\n@dataclass\nclass Settings:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\nload_dotenv()\n\n\n@dataclass\nclass Settings:\n SECRET_KEY = os.getenv('SECRET_KEY', 'mysecret')\n ALGORITHM = 'HS256'\n ACCESS_TOKEN_EXPIRE_MINUTES = 30\n pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')\n oauth2_scheme = OAuth2PasswordBearer(tokenUrl='token')\n\n\n<mask token>\n",
"step-3": "<mask token>\nload_dotenv()\n\n\n@dataclass\nclass Settings:\n SECRET_KEY = os.getenv('SECRET_KEY', 'mysecret')\n ALGORITHM = 'HS256'\n ACCESS_TOKEN_EXPIRE_MINUTES = 30\n pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')\n oauth2_scheme = OAuth2PasswordBearer(tokenUrl='token')\n\n\nsettings = Settings()\n",
"step-4": "import os\nfrom dataclasses import dataclass\nfrom dotenv import load_dotenv\nfrom fastapi.security import OAuth2PasswordBearer\nfrom passlib.context import CryptContext\nload_dotenv()\n\n\n@dataclass\nclass Settings:\n SECRET_KEY = os.getenv('SECRET_KEY', 'mysecret')\n ALGORITHM = 'HS256'\n ACCESS_TOKEN_EXPIRE_MINUTES = 30\n pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')\n oauth2_scheme = OAuth2PasswordBearer(tokenUrl='token')\n\n\nsettings = Settings()\n",
"step-5": "import os\nfrom dataclasses import dataclass\n\nfrom dotenv import load_dotenv\nfrom fastapi.security import OAuth2PasswordBearer\nfrom passlib.context import CryptContext\n\nload_dotenv()\n\n\n@dataclass\nclass Settings:\n SECRET_KEY = os.getenv(\"SECRET_KEY\", \"mysecret\")\n ALGORITHM = \"HS256\"\n ACCESS_TOKEN_EXPIRE_MINUTES = 30\n\n pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n oauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\n\n\nsettings = Settings()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
def solution(name):
Len = len(name)
nameList = [name[i] for i in range(Len)]
nameField = ['A' for i in range(Len)]
answer = 0
# 정방향
for i in range(Len):
a = ord(nameField[i])
b = ord(nameList[i])
if b-a <= 13 : # 절반 이하면 그냥 더하고
answer += b-a
else : # 절반 넘으면 26에서 빼기
answer += 26 - (b-a)
nameField[i] = nameList[i] # name "A"가 들어간게 있을수도 있으니
if nameField == nameList : # 값 바꿔주고 전체 체크!!
break
answer +=1 # 이동가중치 ++
dap = answer
# 정방향 + 역방향
t = (int)(Len/2)
for i in range(t): # 0~전체길이/2
nameField = ['A' for i in range(Len)]
answer = i
for j in range(i+1): #정방향
a = ord(nameField[j])
b = ord(nameList[j])
if b-a <= 13 :
answer += b-a
else :
answer += 26 - (b-a)
nameField[j] = nameList[j]
if nameField == nameList :
break
answer +=1
for j in range(Len-1,i,-1): #역방향
a = ord(nameField[j])
b = ord(nameList[j])
if b-a <= 13 :
answer += b-a
else :
answer += 26 - (b-a)
nameField[j] = nameList[j]
if nameField == nameList :
break
answer +=1
dap = min(dap,answer)
return dap
'''
중복코드로 많아 함수로 빼고싶었지만..패쓰!
정방향의 가중치와
정방향으로 0~길이/2 만큼까지 가고 + 역방향 가면서
원하는 name만들어졌는지 계속 체크!
최소가중치를 구해서 출력!!
'''
|
normal
|
{
"blob_id": "8766003a85b1ed83927988df147b0b3004cb91f9",
"index": 7691,
"step-1": "<mask token>\n",
"step-2": "def solution(name):\n Len = len(name)\n nameList = [name[i] for i in range(Len)]\n nameField = ['A' for i in range(Len)]\n answer = 0\n for i in range(Len):\n a = ord(nameField[i])\n b = ord(nameList[i])\n if b - a <= 13:\n answer += b - a\n else:\n answer += 26 - (b - a)\n nameField[i] = nameList[i]\n if nameField == nameList:\n break\n answer += 1\n dap = answer\n t = int(Len / 2)\n for i in range(t):\n nameField = ['A' for i in range(Len)]\n answer = i\n for j in range(i + 1):\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b - a <= 13:\n answer += b - a\n else:\n answer += 26 - (b - a)\n nameField[j] = nameList[j]\n if nameField == nameList:\n break\n answer += 1\n for j in range(Len - 1, i, -1):\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b - a <= 13:\n answer += b - a\n else:\n answer += 26 - (b - a)\n nameField[j] = nameList[j]\n if nameField == nameList:\n break\n answer += 1\n dap = min(dap, answer)\n return dap\n\n\n<mask token>\n",
"step-3": "def solution(name):\n Len = len(name)\n nameList = [name[i] for i in range(Len)]\n nameField = ['A' for i in range(Len)]\n answer = 0\n \n # 정방향\n for i in range(Len):\n a = ord(nameField[i])\n b = ord(nameList[i])\n if b-a <= 13 : # 절반 이하면 그냥 더하고\n answer += b-a\n else : # 절반 넘으면 26에서 빼기\n answer += 26 - (b-a)\n \n nameField[i] = nameList[i] # name \"A\"가 들어간게 있을수도 있으니\n if nameField == nameList : # 값 바꿔주고 전체 체크!!\n break\n\n answer +=1 # 이동가중치 ++\n dap = answer\n \n # 정방향 + 역방향\n t = (int)(Len/2)\n for i in range(t): # 0~전체길이/2\n nameField = ['A' for i in range(Len)]\n answer = i\n \n for j in range(i+1): #정방향\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b-a <= 13 :\n answer += b-a\n else :\n answer += 26 - (b-a)\n\n nameField[j] = nameList[j]\n if nameField == nameList :\n break\n\n answer +=1\n \n\n for j in range(Len-1,i,-1): #역방향\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b-a <= 13 :\n answer += b-a\n else :\n answer += 26 - (b-a)\n\n nameField[j] = nameList[j]\n if nameField == nameList :\n break\n\n answer +=1\n \n dap = min(dap,answer)\n \n return dap\n\n\n'''\n중복코드로 많아 함수로 빼고싶었지만..패쓰!\n\n정방향의 가중치와\n정방향으로 0~길이/2 만큼까지 가고 + 역방향 가면서\n원하는 name만들어졌는지 계속 체크!\n\n최소가중치를 구해서 출력!!\n'''",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inverse_rescale(y):
return tf.round(tf.multiply(tf.add(y, 1), 127.5))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def data_rescale(x):
return tf.subtract(tf.divide(x, 127.5), 1)
def inverse_rescale(y):
return tf.round(tf.multiply(tf.add(y, 1), 127.5))
<|reserved_special_token_1|>
import tensorflow as tf
def data_rescale(x):
return tf.subtract(tf.divide(x, 127.5), 1)
def inverse_rescale(y):
return tf.round(tf.multiply(tf.add(y, 1), 127.5))
|
flexible
|
{
"blob_id": "1a09b38838f40c4c6049da8e6a72ba3d56806c07",
"index": 3703,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef inverse_rescale(y):\n return tf.round(tf.multiply(tf.add(y, 1), 127.5))\n",
"step-3": "<mask token>\n\n\ndef data_rescale(x):\n return tf.subtract(tf.divide(x, 127.5), 1)\n\n\ndef inverse_rescale(y):\n return tf.round(tf.multiply(tf.add(y, 1), 127.5))\n",
"step-4": "import tensorflow as tf\n\n\ndef data_rescale(x):\n return tf.subtract(tf.divide(x, 127.5), 1)\n\n\ndef inverse_rescale(y):\n return tf.round(tf.multiply(tf.add(y, 1), 127.5))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class coreGetHome(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_200_template_home(self):
self.assertEqual(200, self.resp.status_code)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class coreGetHome(TestCase):
def setUp(self):
self.resp = self.client.get(r('core:core_home'))
<|reserved_special_token_0|>
def test_200_template_home(self):
self.assertEqual(200, self.resp.status_code)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class coreGetHome(TestCase):
def setUp(self):
self.resp = self.client.get(r('core:core_home'))
def test_template_home(self):
self.assertTemplateUsed(self.resp, 'index.html')
def test_200_template_home(self):
self.assertEqual(200, self.resp.status_code)
<|reserved_special_token_1|>
from django.shortcuts import resolve_url as r
from django.test import TestCase
class coreGetHome(TestCase):
def setUp(self):
self.resp = self.client.get(r('core:core_home'))
def test_template_home(self):
self.assertTemplateUsed(self.resp, 'index.html')
def test_200_template_home(self):
self.assertEqual(200, self.resp.status_code)
|
flexible
|
{
"blob_id": "d20e41dd7054ff133be264bebf13e4e218710ae5",
"index": 933,
"step-1": "<mask token>\n\n\nclass coreGetHome(TestCase):\n <mask token>\n <mask token>\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-2": "<mask token>\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n <mask token>\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-3": "<mask token>\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-4": "from django.shortcuts import resolve_url as r\nfrom django.test import TestCase\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class QueueOutputMJPEG(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
class QueueOutputH264(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if True:
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
<|reserved_special_token_0|>
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if '/data.html' in self.path:
strprops = 'ffffd9'
if not self.server.DetectQueue.empty():
props = self.server.DetectQueue.get(False)
pcnt = 0
for prop in props:
strprops += (
'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\n'
.format(prop['coord'][0], prop['coord'][1], prop[
'coord'][2], prop['coord'][3], pcnt))
pcnt += 1
strprops += 'ffaaee'
content = strprops.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif '/stream.mjpg' in self.path:
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type',
'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
while self.server.MJPEGQueue.empty():
pass
buf = io.BytesIO(self.server.MJPEGQueue.get())
try:
st = time.monotonic()
cnt = 1
fps = 0
ospid = os.getpid()
while True:
if not self.server.MJPEGQueue.empty():
buf = io.BytesIO(self.server.MJPEGQueue.get(False))
if cnt >= 20:
fps = cnt / (time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)
)
else:
cnt += 1
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buf.getvalue()))
self.end_headers()
self.wfile.write(buf.getvalue())
self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from MJPEG %s: %s', self.
client_address, str(e))
else:
try:
st2 = time.monotonic()
cnt2 = 1
fps2 = 0
ospid2 = os.getpid()
while True:
if not self.server.H264Queue.empty():
frame = io.BytesIO(self.server.H264Queue.get(False))
buf = frame
if cnt2 >= 20:
fps2 = cnt2 / (time.monotonic() - st2)
st2 = time.monotonic()
cnt2 = 1
print('%d: Streaming H264 at %dFPS' % (ospid2,
fps2))
else:
cnt2 += 1
self.wfile.write(buf.getvalue())
except Exception as e:
print('Removed streaming clients from H264 %s: %s', self.
client_address, str(e))
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QueueOutputMJPEG(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
class QueueOutputH264(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if True:
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
<|reserved_special_token_0|>
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if '/data.html' in self.path:
strprops = 'ffffd9'
if not self.server.DetectQueue.empty():
props = self.server.DetectQueue.get(False)
pcnt = 0
for prop in props:
strprops += (
'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\n'
.format(prop['coord'][0], prop['coord'][1], prop[
'coord'][2], prop['coord'][3], pcnt))
pcnt += 1
strprops += 'ffaaee'
content = strprops.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif '/stream.mjpg' in self.path:
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type',
'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
while self.server.MJPEGQueue.empty():
pass
buf = io.BytesIO(self.server.MJPEGQueue.get())
try:
st = time.monotonic()
cnt = 1
fps = 0
ospid = os.getpid()
while True:
if not self.server.MJPEGQueue.empty():
buf = io.BytesIO(self.server.MJPEGQueue.get(False))
if cnt >= 20:
fps = cnt / (time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)
)
else:
cnt += 1
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buf.getvalue()))
self.end_headers()
self.wfile.write(buf.getvalue())
self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from MJPEG %s: %s', self.
client_address, str(e))
else:
try:
st2 = time.monotonic()
cnt2 = 1
fps2 = 0
ospid2 = os.getpid()
while True:
if not self.server.H264Queue.empty():
frame = io.BytesIO(self.server.H264Queue.get(False))
buf = frame
if cnt2 >= 20:
fps2 = cnt2 / (time.monotonic() - st2)
st2 = time.monotonic()
cnt2 = 1
print('%d: Streaming H264 at %dFPS' % (ospid2,
fps2))
else:
cnt2 += 1
self.wfile.write(buf.getvalue())
except Exception as e:
print('Removed streaming clients from H264 %s: %s', self.
client_address, str(e))
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
def server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):
try:
address = '', port
server = StreamingServer(address, StreamingHandler)
server.MJPEGQueue = MJPEGQueue
server.DetectQueue = DetectQueue
server.H264Queue = H264Queue
print('Started server')
server.serve_forever()
finally:
servstop.set()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QueueOutputMJPEG(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
class QueueOutputH264(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if True:
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
def do_capture(queueH264, queueMJPEG, stopCap):
print('Capture started')
with picamera.PiCamera(sensor_mode=2) as camera:
camera.resolution = 1280, 720
camera.framerate = 15
camera.video_stabilization = True
camera.video_denoise = True
camera.vflip = True
camera.sharpness = 20
camera.meter_mode = 'matrix'
camera.awb_mode = 'auto'
camera.saturation = 2
camera.contrast = 10
camera.drc_strength = 'high'
camera.exposure_mode = 'antishake'
camera.exposure_compensation = 3
outputH264 = QueueOutputH264(queueH264, stopCap)
outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)
camera.start_recording(outputH264, format='h264', profile='high',
intra_period=30, sps_timing=True, bitrate=4000000, quality=25,
resize=(420, 234))
camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg',
resize=(672, 384))
while not stopCap.wait(0):
pass
camera.stop_recording(splitter_port=2)
camera.stop_recording()
time.sleep(0.2)
camera.close()
def do_detection(ImageQueue, RectQueue, finished):
net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml',
'pedestrian-detection-adas-002.bin')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
st = time.monotonic()
cnt = 1
fps = 0
FutureOuts = []
ospid = os.getpid()
while not finished.wait(0):
stream = None
try:
stream = io.BytesIO(ImageQueue.get(False))
except:
pass
if len(FutureOuts) == 3:
stream = None
if not stream is None:
stream.seek(0)
try:
image = Image.open(stream).convert('RGB')
except:
pass
cv_img = np.array(image)
cv_img = cv_img[:, :, ::-1].copy()
blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672, 384), mean
=(127.5, 127.5, 127.5), swapRB=False, crop=False)
net.setInput(blob)
FutureOuts.append(net.forwardAsync())
while FutureOuts and FutureOuts[0].wait_for(0):
out1 = FutureOuts[0].get()
if cnt >= 20:
fps = cnt / (time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Detecting at %FPS' % (ospid, fps))
else:
cnt += 1
props = []
for detection in out1.reshape(-1, 7):
inf = []
obj_type = int(detection[1] - 1)
conf = float(detection[2])
xmin = float(detection[3])
ymin = float(detection[4])
xmax = float(detection[5])
ymax = float(detection[6])
if conf > 0.6:
prop = {'coord': (xmin, ymin, xmax, ymax), 'type':
obj_type, 'conf': conf}
props.append(prop)
if RectQueue.empty():
RectQueue.put(props)
del FutureOuts[0]
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if '/data.html' in self.path:
strprops = 'ffffd9'
if not self.server.DetectQueue.empty():
props = self.server.DetectQueue.get(False)
pcnt = 0
for prop in props:
strprops += (
'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\n'
.format(prop['coord'][0], prop['coord'][1], prop[
'coord'][2], prop['coord'][3], pcnt))
pcnt += 1
strprops += 'ffaaee'
content = strprops.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif '/stream.mjpg' in self.path:
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type',
'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
while self.server.MJPEGQueue.empty():
pass
buf = io.BytesIO(self.server.MJPEGQueue.get())
try:
st = time.monotonic()
cnt = 1
fps = 0
ospid = os.getpid()
while True:
if not self.server.MJPEGQueue.empty():
buf = io.BytesIO(self.server.MJPEGQueue.get(False))
if cnt >= 20:
fps = cnt / (time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)
)
else:
cnt += 1
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buf.getvalue()))
self.end_headers()
self.wfile.write(buf.getvalue())
self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from MJPEG %s: %s', self.
client_address, str(e))
else:
try:
st2 = time.monotonic()
cnt2 = 1
fps2 = 0
ospid2 = os.getpid()
while True:
if not self.server.H264Queue.empty():
frame = io.BytesIO(self.server.H264Queue.get(False))
buf = frame
if cnt2 >= 20:
fps2 = cnt2 / (time.monotonic() - st2)
st2 = time.monotonic()
cnt2 = 1
print('%d: Streaming H264 at %dFPS' % (ospid2,
fps2))
else:
cnt2 += 1
self.wfile.write(buf.getvalue())
except Exception as e:
print('Removed streaming clients from H264 %s: %s', self.
client_address, str(e))
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
def server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):
try:
address = '', port
server = StreamingServer(address, StreamingHandler)
server.MJPEGQueue = MJPEGQueue
server.DetectQueue = DetectQueue
server.H264Queue = H264Queue
print('Started server')
server.serve_forever()
finally:
servstop.set()
if __name__ == '__main__':
queueH264 = mp.Queue(1)
queueMJPEG = mp.Queue(1)
queueDetectRect = mp.Queue(1)
stopCapture = mp.Event()
queueProcessedLow = mp.Queue(1)
queueProcessedHigh = mp.Queue(1)
ServerStop = mp.Event()
capture_proc = mp.Process(target=do_capture, args=(queueH264,
queueMJPEG, stopCapture), daemon=True)
server_proc = mp.Process(target=server_start, args=(queueMJPEG,
queueH264, queueDetectRect, 8000, stopCapture), daemon=True)
detect_proc = mp.Process(target=do_detection, args=(queueMJPEG,
queueDetectRect, stopCapture), daemon=True)
capture_proc.start()
detect_proc.start()
server_proc.start()
while True:
if stopCapture.is_set():
stopCapture.set()
time.sleep(0.1)
capture_proc.terminate()
server_proc.terminate()
detect_proc.terminate()
proccessing_proc_lores.terminate()
break
time.sleep(1)
<|reserved_special_token_1|>
import os
import io
import time
import multiprocessing as mp
from queue import Empty
import picamera
from PIL import Image
from http import server
import socketserver
import numpy as np
import cv2
class QueueOutputMJPEG(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
class QueueOutputH264(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if True:
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
def do_capture(queueH264, queueMJPEG, stopCap):
print('Capture started')
with picamera.PiCamera(sensor_mode=2) as camera:
camera.resolution = 1280, 720
camera.framerate = 15
camera.video_stabilization = True
camera.video_denoise = True
camera.vflip = True
camera.sharpness = 20
camera.meter_mode = 'matrix'
camera.awb_mode = 'auto'
camera.saturation = 2
camera.contrast = 10
camera.drc_strength = 'high'
camera.exposure_mode = 'antishake'
camera.exposure_compensation = 3
outputH264 = QueueOutputH264(queueH264, stopCap)
outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)
camera.start_recording(outputH264, format='h264', profile='high',
intra_period=30, sps_timing=True, bitrate=4000000, quality=25,
resize=(420, 234))
camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg',
resize=(672, 384))
while not stopCap.wait(0):
pass
camera.stop_recording(splitter_port=2)
camera.stop_recording()
time.sleep(0.2)
camera.close()
def do_detection(ImageQueue, RectQueue, finished):
net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml',
'pedestrian-detection-adas-002.bin')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
st = time.monotonic()
cnt = 1
fps = 0
FutureOuts = []
ospid = os.getpid()
while not finished.wait(0):
stream = None
try:
stream = io.BytesIO(ImageQueue.get(False))
except:
pass
if len(FutureOuts) == 3:
stream = None
if not stream is None:
stream.seek(0)
try:
image = Image.open(stream).convert('RGB')
except:
pass
cv_img = np.array(image)
cv_img = cv_img[:, :, ::-1].copy()
blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672, 384), mean
=(127.5, 127.5, 127.5), swapRB=False, crop=False)
net.setInput(blob)
FutureOuts.append(net.forwardAsync())
while FutureOuts and FutureOuts[0].wait_for(0):
out1 = FutureOuts[0].get()
if cnt >= 20:
fps = cnt / (time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Detecting at %FPS' % (ospid, fps))
else:
cnt += 1
props = []
for detection in out1.reshape(-1, 7):
inf = []
obj_type = int(detection[1] - 1)
conf = float(detection[2])
xmin = float(detection[3])
ymin = float(detection[4])
xmax = float(detection[5])
ymax = float(detection[6])
if conf > 0.6:
prop = {'coord': (xmin, ymin, xmax, ymax), 'type':
obj_type, 'conf': conf}
props.append(prop)
if RectQueue.empty():
RectQueue.put(props)
del FutureOuts[0]
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if '/data.html' in self.path:
strprops = 'ffffd9'
if not self.server.DetectQueue.empty():
props = self.server.DetectQueue.get(False)
pcnt = 0
for prop in props:
strprops += (
'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\n'
.format(prop['coord'][0], prop['coord'][1], prop[
'coord'][2], prop['coord'][3], pcnt))
pcnt += 1
strprops += 'ffaaee'
content = strprops.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif '/stream.mjpg' in self.path:
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type',
'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
while self.server.MJPEGQueue.empty():
pass
buf = io.BytesIO(self.server.MJPEGQueue.get())
try:
st = time.monotonic()
cnt = 1
fps = 0
ospid = os.getpid()
while True:
if not self.server.MJPEGQueue.empty():
buf = io.BytesIO(self.server.MJPEGQueue.get(False))
if cnt >= 20:
fps = cnt / (time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)
)
else:
cnt += 1
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buf.getvalue()))
self.end_headers()
self.wfile.write(buf.getvalue())
self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from MJPEG %s: %s', self.
client_address, str(e))
else:
try:
st2 = time.monotonic()
cnt2 = 1
fps2 = 0
ospid2 = os.getpid()
while True:
if not self.server.H264Queue.empty():
frame = io.BytesIO(self.server.H264Queue.get(False))
buf = frame
if cnt2 >= 20:
fps2 = cnt2 / (time.monotonic() - st2)
st2 = time.monotonic()
cnt2 = 1
print('%d: Streaming H264 at %dFPS' % (ospid2,
fps2))
else:
cnt2 += 1
self.wfile.write(buf.getvalue())
except Exception as e:
print('Removed streaming clients from H264 %s: %s', self.
client_address, str(e))
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
def server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):
try:
address = '', port
server = StreamingServer(address, StreamingHandler)
server.MJPEGQueue = MJPEGQueue
server.DetectQueue = DetectQueue
server.H264Queue = H264Queue
print('Started server')
server.serve_forever()
finally:
servstop.set()
if __name__ == '__main__':
queueH264 = mp.Queue(1)
queueMJPEG = mp.Queue(1)
queueDetectRect = mp.Queue(1)
stopCapture = mp.Event()
queueProcessedLow = mp.Queue(1)
queueProcessedHigh = mp.Queue(1)
ServerStop = mp.Event()
capture_proc = mp.Process(target=do_capture, args=(queueH264,
queueMJPEG, stopCapture), daemon=True)
server_proc = mp.Process(target=server_start, args=(queueMJPEG,
queueH264, queueDetectRect, 8000, stopCapture), daemon=True)
detect_proc = mp.Process(target=do_detection, args=(queueMJPEG,
queueDetectRect, stopCapture), daemon=True)
capture_proc.start()
detect_proc.start()
server_proc.start()
while True:
if stopCapture.is_set():
stopCapture.set()
time.sleep(0.1)
capture_proc.terminate()
server_proc.terminate()
detect_proc.terminate()
proccessing_proc_lores.terminate()
break
time.sleep(1)
<|reserved_special_token_1|>
import os
import io
import time
import multiprocessing as mp
from queue import Empty
import picamera
from PIL import Image
from http import server
import socketserver
import numpy as np
import cv2
class QueueOutputMJPEG(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, put the last frame's data in the queue
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
class QueueOutputH264(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if True:
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
def do_capture(queueH264, queueMJPEG, stopCap):
print('Capture started')
with picamera.PiCamera(sensor_mode=2) as camera:
camera.resolution=(1280, 720)
camera.framerate=15
camera.video_stabilization = True
camera.video_denoise = True
camera.vflip = True
camera.sharpness = 20
camera.meter_mode = 'matrix'
camera.awb_mode = 'auto'
camera.saturation = 2
camera.contrast = 10
camera.drc_strength = 'high'
camera.exposure_mode = 'antishake'
camera.exposure_compensation = 3
outputH264 = QueueOutputH264(queueH264, stopCap)
outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)
camera.start_recording(outputH264, format='h264', profile='high', intra_period=30, sps_timing=True, bitrate=4000000, quality=25, resize=(420,234))
camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg', resize=(672,384))
while not stopCap.wait(0): #camera.wait_recording(100)
pass
camera.stop_recording(splitter_port=2)
camera.stop_recording()
time.sleep(0.2)
camera.close()
def do_detection(ImageQueue, RectQueue, finished):
net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml', 'pedestrian-detection-adas-002.bin')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
st = time.monotonic()
cnt = 1
fps = 0
FutureOuts = []
ospid = os.getpid()
while not finished.wait(0):
stream = None
try:
stream = io.BytesIO(ImageQueue.get(False))
except:
pass
if len(FutureOuts) == 3:
stream = None
if not stream is None:
stream.seek(0)
try:
image = Image.open(stream).convert('RGB')
except:
pass
cv_img = np.array(image)
cv_img = cv_img[:, :, ::-1].copy()
blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672,384),\
mean=(127.5, 127.5, 127.5), swapRB=False, crop=False)
net.setInput(blob)
FutureOuts.append(net.forwardAsync())
while FutureOuts and FutureOuts[0].wait_for(0):
out1 = FutureOuts[0].get()
if cnt >= 20:
fps = cnt/(time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Detecting at %FPS' % (ospid, fps))
else:
cnt += 1
props = []
for detection in out1.reshape(-1,7):
inf = []
obj_type = int(detection[1]-1)
conf = float(detection[2])
xmin = float(detection[3])
ymin = float(detection[4])
xmax = float(detection[5])
ymax = float(detection[6])
if conf > 0.6:
prop = {'coord': (xmin, ymin, xmax, ymax), 'type': obj_type, 'conf': conf}
props.append(prop)
if RectQueue.empty():
RectQueue.put(props)
del FutureOuts[0]
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if '/data.html' in self.path:
strprops = "ffffd9"
if not self.server.DetectQueue.empty():
props = self.server.DetectQueue.get(False)
pcnt = 0
for prop in props:
strprops += 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\n'.format(
prop['coord'][0], prop['coord'][1], prop['coord'][2], prop['coord'][3], pcnt)
pcnt += 1
strprops += "ffaaee"
content = strprops.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif '/stream.mjpg' in self.path:
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
while self.server.MJPEGQueue.empty():
pass
buf = io.BytesIO(self.server.MJPEGQueue.get())
try:
st = time.monotonic()
cnt = 1
fps = 0
ospid = os.getpid()
while True:
if not self.server.MJPEGQueue.empty():
buf = io.BytesIO(self.server.MJPEGQueue.get(False))
if cnt >= 20:
fps = cnt/(time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Streaming MJPEG at %dFPS' % (ospid, fps))
else:
cnt += 1
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buf.getvalue()))
self.end_headers()
self.wfile.write(buf.getvalue())
self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from MJPEG %s: %s', self.client_address, str(e))
else:
#self.send_response(200)
#self.send_header('Age', 0)
#self.send_header('Cache-Control', 'no-cache, private')
#self.send_header('Pragma', 'no-cache')
#self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
#self.end_headers()
try:
st2 = time.monotonic()
cnt2 = 1
fps2 = 0
ospid2 = os.getpid()
while True:
if not self.server.H264Queue.empty():
frame = io.BytesIO(self.server.H264Queue.get(False))
buf = frame
if cnt2 >= 20:
fps2 = cnt2/(time.monotonic() - st2)
st2 = time.monotonic()
cnt2 = 1
print('%d: Streaming H264 at %dFPS' % (ospid2, fps2))
else:
cnt2 += 1
self.wfile.write(buf.getvalue())
#self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from H264 %s: %s', self.client_address, str(e))
# else:
# self.send_error(404)
# self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
def server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):
try:
address = ('', port)
server = StreamingServer(address, StreamingHandler)
server.MJPEGQueue = MJPEGQueue
server.DetectQueue = DetectQueue
server.H264Queue = H264Queue
print('Started server')
server.serve_forever()
finally:
servstop.set()
if __name__ == '__main__':
queueH264 = mp.Queue(1)
queueMJPEG = mp.Queue(1)
queueDetectRect = mp.Queue(1)
stopCapture = mp.Event()
queueProcessedLow = mp.Queue(1)
queueProcessedHigh = mp.Queue(1)
ServerStop = mp.Event()
capture_proc = mp.Process(target=do_capture, args=(queueH264, queueMJPEG, stopCapture), daemon=True)
server_proc = mp.Process(target=server_start, args=(queueMJPEG, queueH264, queueDetectRect, 8000, stopCapture), daemon=True)
detect_proc = mp.Process(target=do_detection, args=(queueMJPEG, queueDetectRect, stopCapture), daemon=True)
capture_proc.start()
detect_proc.start()
server_proc.start()
while True:
if stopCapture.is_set():
stopCapture.set()
time.sleep(0.1)
capture_proc.terminate()
server_proc.terminate()
detect_proc.terminate()
proccessing_proc_lores.terminate()
break
time.sleep(1)
|
flexible
|
{
"blob_id": "ffd034eb5f0482c027dcc344bddb01b90249511c",
"index": 3198,
"step-1": "<mask token>\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\n<mask token>\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\n<mask token>\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = '', port\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\ndef do_capture(queueH264, queueMJPEG, stopCap):\n print('Capture started')\n with picamera.PiCamera(sensor_mode=2) as camera:\n camera.resolution = 1280, 720\n camera.framerate = 15\n camera.video_stabilization = True\n camera.video_denoise = True\n camera.vflip = True\n camera.sharpness = 20\n camera.meter_mode = 'matrix'\n camera.awb_mode = 'auto'\n camera.saturation = 2\n camera.contrast = 10\n camera.drc_strength = 'high'\n camera.exposure_mode = 'antishake'\n camera.exposure_compensation = 3\n outputH264 = QueueOutputH264(queueH264, stopCap)\n outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)\n camera.start_recording(outputH264, format='h264', profile='high',\n intra_period=30, sps_timing=True, bitrate=4000000, quality=25,\n resize=(420, 234))\n camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg',\n resize=(672, 384))\n while not stopCap.wait(0):\n pass\n camera.stop_recording(splitter_port=2)\n camera.stop_recording()\n time.sleep(0.2)\n camera.close()\n\n\ndef do_detection(ImageQueue, RectQueue, finished):\n net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml',\n 'pedestrian-detection-adas-002.bin')\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n st = time.monotonic()\n cnt = 1\n fps = 0\n FutureOuts = []\n ospid = os.getpid()\n while not finished.wait(0):\n stream = None\n try:\n stream = io.BytesIO(ImageQueue.get(False))\n except:\n pass\n if len(FutureOuts) == 3:\n stream = None\n if not stream is None:\n stream.seek(0)\n try:\n image = Image.open(stream).convert('RGB')\n except:\n pass\n cv_img = np.array(image)\n cv_img = cv_img[:, :, ::-1].copy()\n blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672, 384), mean\n =(127.5, 127.5, 127.5), swapRB=False, crop=False)\n net.setInput(blob)\n FutureOuts.append(net.forwardAsync())\n while FutureOuts and FutureOuts[0].wait_for(0):\n out1 = FutureOuts[0].get()\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Detecting at %FPS' % (ospid, fps))\n else:\n cnt += 1\n props = []\n for detection in out1.reshape(-1, 7):\n inf = []\n obj_type = int(detection[1] - 1)\n conf = float(detection[2])\n xmin = float(detection[3])\n ymin = float(detection[4])\n xmax = float(detection[5])\n ymax = float(detection[6])\n if conf > 0.6:\n prop = {'coord': (xmin, ymin, xmax, ymax), 'type':\n obj_type, 'conf': conf}\n props.append(prop)\n if RectQueue.empty():\n RectQueue.put(props)\n del FutureOuts[0]\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = '', port\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\n\nif __name__ == '__main__':\n queueH264 = mp.Queue(1)\n queueMJPEG = mp.Queue(1)\n queueDetectRect = mp.Queue(1)\n stopCapture = mp.Event()\n queueProcessedLow = mp.Queue(1)\n queueProcessedHigh = mp.Queue(1)\n ServerStop = mp.Event()\n capture_proc = mp.Process(target=do_capture, args=(queueH264,\n queueMJPEG, stopCapture), daemon=True)\n server_proc = mp.Process(target=server_start, args=(queueMJPEG,\n queueH264, queueDetectRect, 8000, stopCapture), daemon=True)\n detect_proc = mp.Process(target=do_detection, args=(queueMJPEG,\n queueDetectRect, stopCapture), daemon=True)\n capture_proc.start()\n detect_proc.start()\n server_proc.start()\n while True:\n if stopCapture.is_set():\n stopCapture.set()\n time.sleep(0.1)\n capture_proc.terminate()\n server_proc.terminate()\n detect_proc.terminate()\n proccessing_proc_lores.terminate()\n break\n time.sleep(1)\n",
"step-4": "import os\nimport io\nimport time\nimport multiprocessing as mp\nfrom queue import Empty\nimport picamera\nfrom PIL import Image\nfrom http import server\nimport socketserver\nimport numpy as np\nimport cv2\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\ndef do_capture(queueH264, queueMJPEG, stopCap):\n print('Capture started')\n with picamera.PiCamera(sensor_mode=2) as camera:\n camera.resolution = 1280, 720\n camera.framerate = 15\n camera.video_stabilization = True\n camera.video_denoise = True\n camera.vflip = True\n camera.sharpness = 20\n camera.meter_mode = 'matrix'\n camera.awb_mode = 'auto'\n camera.saturation = 2\n camera.contrast = 10\n camera.drc_strength = 'high'\n camera.exposure_mode = 'antishake'\n camera.exposure_compensation = 3\n outputH264 = QueueOutputH264(queueH264, stopCap)\n outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)\n camera.start_recording(outputH264, format='h264', profile='high',\n intra_period=30, sps_timing=True, bitrate=4000000, quality=25,\n resize=(420, 234))\n camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg',\n resize=(672, 384))\n while not stopCap.wait(0):\n pass\n camera.stop_recording(splitter_port=2)\n camera.stop_recording()\n time.sleep(0.2)\n camera.close()\n\n\ndef do_detection(ImageQueue, RectQueue, finished):\n net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml',\n 'pedestrian-detection-adas-002.bin')\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n st = time.monotonic()\n cnt = 1\n fps = 0\n FutureOuts = []\n ospid = os.getpid()\n while not finished.wait(0):\n stream = None\n try:\n stream = io.BytesIO(ImageQueue.get(False))\n except:\n pass\n if len(FutureOuts) == 3:\n stream = None\n if not stream is None:\n stream.seek(0)\n try:\n image = Image.open(stream).convert('RGB')\n except:\n pass\n cv_img = np.array(image)\n cv_img = cv_img[:, :, ::-1].copy()\n blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672, 384), mean\n =(127.5, 127.5, 127.5), swapRB=False, crop=False)\n net.setInput(blob)\n FutureOuts.append(net.forwardAsync())\n while FutureOuts and FutureOuts[0].wait_for(0):\n out1 = FutureOuts[0].get()\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Detecting at %FPS' % (ospid, fps))\n else:\n cnt += 1\n props = []\n for detection in out1.reshape(-1, 7):\n inf = []\n obj_type = int(detection[1] - 1)\n conf = float(detection[2])\n xmin = float(detection[3])\n ymin = float(detection[4])\n xmax = float(detection[5])\n ymax = float(detection[6])\n if conf > 0.6:\n prop = {'coord': (xmin, ymin, xmax, ymax), 'type':\n obj_type, 'conf': conf}\n props.append(prop)\n if RectQueue.empty():\n RectQueue.put(props)\n del FutureOuts[0]\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = '', port\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\n\nif __name__ == '__main__':\n queueH264 = mp.Queue(1)\n queueMJPEG = mp.Queue(1)\n queueDetectRect = mp.Queue(1)\n stopCapture = mp.Event()\n queueProcessedLow = mp.Queue(1)\n queueProcessedHigh = mp.Queue(1)\n ServerStop = mp.Event()\n capture_proc = mp.Process(target=do_capture, args=(queueH264,\n queueMJPEG, stopCapture), daemon=True)\n server_proc = mp.Process(target=server_start, args=(queueMJPEG,\n queueH264, queueDetectRect, 8000, stopCapture), daemon=True)\n detect_proc = mp.Process(target=do_detection, args=(queueMJPEG,\n queueDetectRect, stopCapture), daemon=True)\n capture_proc.start()\n detect_proc.start()\n server_proc.start()\n while True:\n if stopCapture.is_set():\n stopCapture.set()\n time.sleep(0.1)\n capture_proc.terminate()\n server_proc.terminate()\n detect_proc.terminate()\n proccessing_proc_lores.terminate()\n break\n time.sleep(1)\n",
"step-5": "import os\nimport io\nimport time\nimport multiprocessing as mp\nfrom queue import Empty\nimport picamera\nfrom PIL import Image\nfrom http import server\nimport socketserver\nimport numpy as np\nimport cv2\n\nclass QueueOutputMJPEG(object):\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n # New frame, put the last frame's data in the queue\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\nclass QueueOutputH264(object):\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\ndef do_capture(queueH264, queueMJPEG, stopCap):\n print('Capture started')\n with picamera.PiCamera(sensor_mode=2) as camera:\n camera.resolution=(1280, 720)\n camera.framerate=15\n camera.video_stabilization = True\n camera.video_denoise = True\n camera.vflip = True\n camera.sharpness = 20\n camera.meter_mode = 'matrix'\n camera.awb_mode = 'auto'\n camera.saturation = 2\n camera.contrast = 10\n camera.drc_strength = 'high'\n camera.exposure_mode = 'antishake'\n camera.exposure_compensation = 3\n outputH264 = QueueOutputH264(queueH264, stopCap)\n outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)\n camera.start_recording(outputH264, format='h264', profile='high', intra_period=30, sps_timing=True, bitrate=4000000, quality=25, resize=(420,234))\n camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg', resize=(672,384))\n while not stopCap.wait(0): #camera.wait_recording(100)\n pass\n camera.stop_recording(splitter_port=2)\n camera.stop_recording()\n time.sleep(0.2)\n camera.close()\n\ndef do_detection(ImageQueue, RectQueue, finished):\n net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml', 'pedestrian-detection-adas-002.bin')\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n st = time.monotonic()\n cnt = 1\n fps = 0\n FutureOuts = []\n ospid = os.getpid()\n while not finished.wait(0):\n stream = None\n try:\n stream = io.BytesIO(ImageQueue.get(False))\n except:\n pass\n if len(FutureOuts) == 3:\n stream = None\n if not stream is None:\n stream.seek(0)\n try:\n image = Image.open(stream).convert('RGB')\n except:\n pass\n cv_img = np.array(image)\n cv_img = cv_img[:, :, ::-1].copy()\n blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672,384),\\\n mean=(127.5, 127.5, 127.5), swapRB=False, crop=False)\n net.setInput(blob)\n FutureOuts.append(net.forwardAsync())\n while FutureOuts and FutureOuts[0].wait_for(0):\n out1 = FutureOuts[0].get()\n if cnt >= 20:\n fps = cnt/(time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Detecting at %FPS' % (ospid, fps))\n else:\n cnt += 1\n props = []\n for detection in out1.reshape(-1,7):\n inf = []\n obj_type = int(detection[1]-1)\n conf = float(detection[2])\n xmin = float(detection[3])\n ymin = float(detection[4])\n xmax = float(detection[5])\n ymax = float(detection[6])\n if conf > 0.6:\n prop = {'coord': (xmin, ymin, xmax, ymax), 'type': obj_type, 'conf': conf}\n props.append(prop)\n if RectQueue.empty():\n RectQueue.put(props)\n del FutureOuts[0]\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = \"ffffd9\"\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'.format(\n prop['coord'][0], prop['coord'][1], prop['coord'][2], prop['coord'][3], pcnt)\n pcnt += 1\n strprops += \"ffaaee\"\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt/(time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps))\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.client_address, str(e))\n else:\n #self.send_response(200)\n #self.send_header('Age', 0)\n #self.send_header('Cache-Control', 'no-cache, private')\n #self.send_header('Pragma', 'no-cache')\n #self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')\n #self.end_headers()\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2/(time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2, fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n #self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.client_address, str(e))\n # else:\n # self.send_error(404)\n # self.end_headers()\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = ('', port)\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\nif __name__ == '__main__':\n queueH264 = mp.Queue(1)\n queueMJPEG = mp.Queue(1)\n queueDetectRect = mp.Queue(1)\n stopCapture = mp.Event()\n queueProcessedLow = mp.Queue(1)\n queueProcessedHigh = mp.Queue(1)\n ServerStop = mp.Event()\n capture_proc = mp.Process(target=do_capture, args=(queueH264, queueMJPEG, stopCapture), daemon=True)\n server_proc = mp.Process(target=server_start, args=(queueMJPEG, queueH264, queueDetectRect, 8000, stopCapture), daemon=True)\n detect_proc = mp.Process(target=do_detection, args=(queueMJPEG, queueDetectRect, stopCapture), daemon=True)\n\n capture_proc.start()\n detect_proc.start()\n server_proc.start()\n\n while True:\n if stopCapture.is_set():\n stopCapture.set()\n time.sleep(0.1)\n capture_proc.terminate()\n server_proc.terminate()\n detect_proc.terminate()\n proccessing_proc_lores.terminate()\n break\n time.sleep(1)\n\n",
"step-ids": [
12,
13,
16,
17,
18
]
}
|
[
12,
13,
16,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_sendto_cli_runs_ok():
runner = CliRunner()
result = runner.invoke(cli, ['sendto'])
assert result.exit_code == 0
<|reserved_special_token_1|>
from click.testing import CliRunner
from apitest.actions.cli import cli
def test_sendto_cli_runs_ok():
runner = CliRunner()
result = runner.invoke(cli, ['sendto'])
assert result.exit_code == 0
<|reserved_special_token_1|>
from click.testing import CliRunner
from apitest.actions.cli import cli
def test_sendto_cli_runs_ok():
runner = CliRunner()
result = runner.invoke(cli, ["sendto"])
assert result.exit_code == 0
|
flexible
|
{
"blob_id": "7537deb4560e880365b23a99584d0b1f8fa3daf4",
"index": 5675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_sendto_cli_runs_ok():\n runner = CliRunner()\n result = runner.invoke(cli, ['sendto'])\n assert result.exit_code == 0\n",
"step-3": "from click.testing import CliRunner\nfrom apitest.actions.cli import cli\n\n\ndef test_sendto_cli_runs_ok():\n runner = CliRunner()\n result = runner.invoke(cli, ['sendto'])\n assert result.exit_code == 0\n",
"step-4": "from click.testing import CliRunner\nfrom apitest.actions.cli import cli\n\n\ndef test_sendto_cli_runs_ok():\n runner = CliRunner()\n result = runner.invoke(cli, [\"sendto\"])\n \n assert result.exit_code == 0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if x > 0 and y > 0:
print('1')
elif x > 0 and y < 0:
print('4')
elif x < 0 and y > 0:
print('2')
else:
print('3')
<|reserved_special_token_1|>
x = int(input())
y = int(input())
if x > 0 and y > 0:
print('1')
elif x > 0 and y < 0:
print('4')
elif x < 0 and y > 0:
print('2')
else:
print('3')
<|reserved_special_token_1|>
#14681
#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.
x = int(input())
y = int(input())
if(x>0 and y>0):
print("1")
elif(x>0 and y<0):
print("4")
elif(x<0 and y>0):
print("2")
else:
print("3")
|
flexible
|
{
"blob_id": "e9908e32204da8973f06d98430fc660c90b5e303",
"index": 3987,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif x > 0 and y > 0:\n print('1')\nelif x > 0 and y < 0:\n print('4')\nelif x < 0 and y > 0:\n print('2')\nelse:\n print('3')\n",
"step-3": "x = int(input())\ny = int(input())\nif x > 0 and y > 0:\n print('1')\nelif x > 0 and y < 0:\n print('4')\nelif x < 0 and y > 0:\n print('2')\nelse:\n print('3')\n",
"step-4": "#14681\n#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.\n\nx = int(input())\ny = int(input())\n\nif(x>0 and y>0):\n print(\"1\")\nelif(x>0 and y<0):\n print(\"4\")\nelif(x<0 and y>0):\n print(\"2\")\nelse:\n print(\"3\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, y):
list.append(randint(1, 10))
<|reserved_special_token_0|>
print(f'Исходный список: {list}')
print(f'Новый список список: {new}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
list = []
y = int(input('Введите количество элементов в списке>>> '))
for i in range(0, y):
list.append(randint(1, 10))
new = [el for num, el in enumerate(list) if list[num - 1] < list[num]]
print(f'Исходный список: {list}')
print(f'Новый список список: {new}')
<|reserved_special_token_1|>
from random import randint
list = []
y = int(input('Введите количество элементов в списке>>> '))
for i in range(0, y):
list.append(randint(1, 10))
new = [el for num, el in enumerate(list) if list[num - 1] < list[num]]
print(f'Исходный список: {list}')
print(f'Новый список список: {new}')
<|reserved_special_token_1|>
# Представлен список чисел.
# Необходимо вывести элементы исходного списка,
# значения которых больше предыдущего элемента.
from random import randint
list = []
y = int(input("Введите количество элементов в списке>>> "))
for i in range(0, y):
list.append(randint(1, 10))
new = [el for num, el in enumerate(list) if list[num - 1] < list[num]]
print(f"Исходный список: {list}")
print(f"Новый список список: {new}")
|
flexible
|
{
"blob_id": "bfc4f5e90b7c22a29d33ae9b4a5edfb6086d79f4",
"index": 2344,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, y):\n list.append(randint(1, 10))\n<mask token>\nprint(f'Исходный список: {list}')\nprint(f'Новый список список: {new}')\n",
"step-3": "<mask token>\nlist = []\ny = int(input('Введите количество элементов в списке>>> '))\nfor i in range(0, y):\n list.append(randint(1, 10))\nnew = [el for num, el in enumerate(list) if list[num - 1] < list[num]]\nprint(f'Исходный список: {list}')\nprint(f'Новый список список: {new}')\n",
"step-4": "from random import randint\nlist = []\ny = int(input('Введите количество элементов в списке>>> '))\nfor i in range(0, y):\n list.append(randint(1, 10))\nnew = [el for num, el in enumerate(list) if list[num - 1] < list[num]]\nprint(f'Исходный список: {list}')\nprint(f'Новый список список: {new}')\n",
"step-5": "# Представлен список чисел.\n# Необходимо вывести элементы исходного списка,\n# значения которых больше предыдущего элемента.\nfrom random import randint\n\nlist = []\ny = int(input(\"Введите количество элементов в списке>>> \"))\nfor i in range(0, y):\n list.append(randint(1, 10))\n\nnew = [el for num, el in enumerate(list) if list[num - 1] < list[num]]\nprint(f\"Исходный список: {list}\")\nprint(f\"Новый список список: {new}\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def parse_doc_line(line):
parsed = re.search('\\d[\\d\\s]+\\d', line)
return 'empty' if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions, [('spmsg' if
prediction[0] <= threshold else 'legit') for prediction in clf.
predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_
), true_positives_ / (true_positives_ + false_negatives_)
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return (f1_result, true_negatives_, true_positives_, false_negatives_,
false_positives_)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_doc_line(line):
parsed = re.search('\\d[\\d\\s]+\\d', line)
return 'empty' if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions, [('spmsg' if
prediction[0] <= threshold else 'legit') for prediction in clf.
predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_
), true_positives_ / (true_positives_ + false_negatives_)
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return (f1_result, true_negatives_, true_positives_, false_negatives_,
false_positives_)
<|reserved_special_token_0|>
for part in range(1, 11):
parts_X.append([])
parts_Y.append([])
for file in listdir(f'messages/part{part}'):
f = open(f'messages/part{part}/{file}', 'r')
one = parse_doc_line(f.readline())
f.readline()
two = parse_doc_line(f.readline())
curr_obj = one + ' ' + two
parts_Y[-1].append(re.findall('\\D+', file)[0])
parts_X[-1].append(curr_obj)
f.close()
<|reserved_special_token_0|>
for thresh in range(0, 11):
roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),
np.array(parts_X), np.array(parts_Y), thresh / 10))
<|reserved_special_token_0|>
for lambda_ratio in lambda_ratios:
(f1, true_negatives, true_positives, false_negatives, false_positives) = (
get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=
alpha_smoothing), np.array(parts_X), np.array(parts_Y)))
print(
f"""F1 score: {f1}
True negatives: {true_negatives}
True positives: {true_positives}
False negatives: {false_negatives}
False positives: {false_positives}"""
)
f1_points.append(f1)
true_positives_list.append(true_positives)
false_positives_list.append(false_positives)
true_negatives_list.append(true_negatives)
false_negatives_list.append(false_negatives)
<|reserved_special_token_0|>
plts[0].margins(0.0)
plts[0].set_ylim(ymin=0)
plts[0].plot([point[0] for point in roc_points], [point[1] for point in
roc_points])
plts[0].set_ylabel('Roc Curve')
plts[1].set_xscale('log')
plts[1].plot(lambda_ratios, f1_points, '-b')
plts[1].set_ylabel('F1 score')
plts[1].set_xlim(xmin=1)
plts[2].set_xscale('log')
plts[2].set_yscale('log')
plts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')
plts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'
)
plts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')
plts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'
)
plts[2].legend(loc='upper right')
plts[2].set_xlabel('Lambda_legit / Lambda_spam')
plts[2].set_xlim(xmin=1)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n_gram_range = 1, 1
alpha_smoothing = 1e-10
lambdas_best = [1e+190, 1]
def parse_doc_line(line):
parsed = re.search('\\d[\\d\\s]+\\d', line)
return 'empty' if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions, [('spmsg' if
prediction[0] <= threshold else 'legit') for prediction in clf.
predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_
), true_positives_ / (true_positives_ + false_negatives_)
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return (f1_result, true_negatives_, true_positives_, false_negatives_,
false_positives_)
parts_X = []
parts_Y = []
for part in range(1, 11):
parts_X.append([])
parts_Y.append([])
for file in listdir(f'messages/part{part}'):
f = open(f'messages/part{part}/{file}', 'r')
one = parse_doc_line(f.readline())
f.readline()
two = parse_doc_line(f.readline())
curr_obj = one + ' ' + two
parts_Y[-1].append(re.findall('\\D+', file)[0])
parts_X[-1].append(curr_obj)
f.close()
roc_points = []
for thresh in range(0, 11):
roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),
np.array(parts_X), np.array(parts_Y), thresh / 10))
f1_points = []
true_positives_list = []
false_positives_list = []
true_negatives_list = []
false_negatives_list = []
lambda_ratios = [1, 100000.0, 10000000000.0, 1e+20, 1e+40, 1e+80, 1e+160,
1e+190]
for lambda_ratio in lambda_ratios:
(f1, true_negatives, true_positives, false_negatives, false_positives) = (
get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=
alpha_smoothing), np.array(parts_X), np.array(parts_Y)))
print(
f"""F1 score: {f1}
True negatives: {true_negatives}
True positives: {true_positives}
False negatives: {false_negatives}
False positives: {false_positives}"""
)
f1_points.append(f1)
true_positives_list.append(true_positives)
false_positives_list.append(false_positives)
true_negatives_list.append(true_negatives)
false_negatives_list.append(false_negatives)
fig, plts = plt.subplots(3)
plts[0].margins(0.0)
plts[0].set_ylim(ymin=0)
plts[0].plot([point[0] for point in roc_points], [point[1] for point in
roc_points])
plts[0].set_ylabel('Roc Curve')
plts[1].set_xscale('log')
plts[1].plot(lambda_ratios, f1_points, '-b')
plts[1].set_ylabel('F1 score')
plts[1].set_xlim(xmin=1)
plts[2].set_xscale('log')
plts[2].set_yscale('log')
plts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')
plts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'
)
plts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')
plts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'
)
plts[2].legend(loc='upper right')
plts[2].set_xlabel('Lambda_legit / Lambda_spam')
plts[2].set_xlim(xmin=1)
plt.show()
<|reserved_special_token_1|>
from os import listdir
import re
import numpy as np
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import LeaveOneOut
import matplotlib.pyplot as plt
n_gram_range = 1, 1
alpha_smoothing = 1e-10
lambdas_best = [1e+190, 1]
def parse_doc_line(line):
parsed = re.search('\\d[\\d\\s]+\\d', line)
return 'empty' if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions, [('spmsg' if
prediction[0] <= threshold else 'legit') for prediction in clf.
predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_
), true_positives_ / (true_positives_ + false_negatives_)
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition
]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(
x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
(true_negatives_, true_positives_, false_negatives_, false_positives_
) = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
elif answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return (f1_result, true_negatives_, true_positives_, false_negatives_,
false_positives_)
parts_X = []
parts_Y = []
for part in range(1, 11):
parts_X.append([])
parts_Y.append([])
for file in listdir(f'messages/part{part}'):
f = open(f'messages/part{part}/{file}', 'r')
one = parse_doc_line(f.readline())
f.readline()
two = parse_doc_line(f.readline())
curr_obj = one + ' ' + two
parts_Y[-1].append(re.findall('\\D+', file)[0])
parts_X[-1].append(curr_obj)
f.close()
roc_points = []
for thresh in range(0, 11):
roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),
np.array(parts_X), np.array(parts_Y), thresh / 10))
f1_points = []
true_positives_list = []
false_positives_list = []
true_negatives_list = []
false_negatives_list = []
lambda_ratios = [1, 100000.0, 10000000000.0, 1e+20, 1e+40, 1e+80, 1e+160,
1e+190]
for lambda_ratio in lambda_ratios:
(f1, true_negatives, true_positives, false_negatives, false_positives) = (
get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=
alpha_smoothing), np.array(parts_X), np.array(parts_Y)))
print(
f"""F1 score: {f1}
True negatives: {true_negatives}
True positives: {true_positives}
False negatives: {false_negatives}
False positives: {false_positives}"""
)
f1_points.append(f1)
true_positives_list.append(true_positives)
false_positives_list.append(false_positives)
true_negatives_list.append(true_negatives)
false_negatives_list.append(false_negatives)
fig, plts = plt.subplots(3)
plts[0].margins(0.0)
plts[0].set_ylim(ymin=0)
plts[0].plot([point[0] for point in roc_points], [point[1] for point in
roc_points])
plts[0].set_ylabel('Roc Curve')
plts[1].set_xscale('log')
plts[1].plot(lambda_ratios, f1_points, '-b')
plts[1].set_ylabel('F1 score')
plts[1].set_xlim(xmin=1)
plts[2].set_xscale('log')
plts[2].set_yscale('log')
plts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')
plts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'
)
plts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')
plts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'
)
plts[2].legend(loc='upper right')
plts[2].set_xlabel('Lambda_legit / Lambda_spam')
plts[2].set_xlim(xmin=1)
plt.show()
<|reserved_special_token_1|>
from os import listdir
import re
import numpy as np
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import LeaveOneOut
import matplotlib.pyplot as plt
n_gram_range = (1, 1)
alpha_smoothing = 1e-10
lambdas_best = [1e190, 1]
def parse_doc_line(line):
parsed = re.search(r'\d[\d\s]+\d', line)
return "empty" if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions,
['spmsg' if prediction[0] <= threshold else 'legit' for prediction in
clf.predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
else:
if answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = (
1 - (true_negatives_ / (true_negatives_ + false_positives_)),
true_positives_ / (true_positives_ + false_negatives_))
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
else:
if answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return f1_result, true_negatives_, true_positives_, false_negatives_, false_positives_
parts_X = []
parts_Y = []
for part in range(1, 11):
parts_X.append([])
parts_Y.append([])
for file in listdir(f'messages/part{part}'):
f = open(f'messages/part{part}/{file}', "r")
one = parse_doc_line(f.readline())
f.readline()
two = parse_doc_line(f.readline())
curr_obj = one + " " + two
parts_Y[-1].append(re.findall(r'\D+', file)[0])
parts_X[-1].append(curr_obj)
f.close()
roc_points = []
for thresh in range(0, 11):
roc_points.append(get_roc_point(
MultinomialNB(alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y), thresh / 10))
f1_points = []
true_positives_list = []
false_positives_list = []
true_negatives_list = []
false_negatives_list = []
lambda_ratios = [1, 1e5, 1e10, 1e20, 1e40, 1e80, 1e160, 1e190]
for lambda_ratio in lambda_ratios:
f1, true_negatives, true_positives, false_negatives, false_positives = get_cv_score(
MultinomialNB(class_prior=(lambda_ratio, 1), alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y))
print(f'F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: '
f'{false_negatives}\n False positives: {false_positives}')
f1_points.append(f1)
true_positives_list.append(true_positives)
false_positives_list.append(false_positives)
true_negatives_list.append(true_negatives)
false_negatives_list.append(false_negatives)
fig, plts = plt.subplots(3)
plts[0].margins(0.0)
plts[0].set_ylim(ymin=0)
plts[0].plot([point[0] for point in roc_points], [point[1] for point in roc_points])
plts[0].set_ylabel('Roc Curve')
plts[1].set_xscale('log')
plts[1].plot(lambda_ratios, f1_points, '-b')
plts[1].set_ylabel('F1 score')
plts[1].set_xlim(xmin=1)
plts[2].set_xscale('log')
plts[2].set_yscale('log')
plts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')
plts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives')
plts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')
plts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives')
plts[2].legend(loc="upper right")
plts[2].set_xlabel('Lambda_legit / Lambda_spam')
plts[2].set_xlim(xmin=1)
plt.show()
|
flexible
|
{
"blob_id": "8bb67317ede277e03e8cbdefefeffa3d206ece65",
"index": 9434,
"step-1": "<mask token>\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\n<mask token>\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', 'r')\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + ' ' + two\n parts_Y[-1].append(re.findall('\\\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\n<mask token>\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),\n np.array(parts_X), np.array(parts_Y), thresh / 10))\n<mask token>\nfor lambda_ratio in lambda_ratios:\n (f1, true_negatives, true_positives, false_negatives, false_positives) = (\n get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=\n alpha_smoothing), np.array(parts_X), np.array(parts_Y)))\n print(\n f\"\"\"F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: {false_negatives}\n False positives: {false_positives}\"\"\"\n )\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\n<mask token>\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in\n roc_points])\nplts[0].set_ylabel('Roc Curve')\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'\n )\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'\n )\nplts[2].legend(loc='upper right')\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-3": "<mask token>\nn_gram_range = 1, 1\nalpha_smoothing = 1e-10\nlambdas_best = [1e+190, 1]\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\nparts_X = []\nparts_Y = []\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', 'r')\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + ' ' + two\n parts_Y[-1].append(re.findall('\\\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\nroc_points = []\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),\n np.array(parts_X), np.array(parts_Y), thresh / 10))\nf1_points = []\ntrue_positives_list = []\nfalse_positives_list = []\ntrue_negatives_list = []\nfalse_negatives_list = []\nlambda_ratios = [1, 100000.0, 10000000000.0, 1e+20, 1e+40, 1e+80, 1e+160, \n 1e+190]\nfor lambda_ratio in lambda_ratios:\n (f1, true_negatives, true_positives, false_negatives, false_positives) = (\n get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=\n alpha_smoothing), np.array(parts_X), np.array(parts_Y)))\n print(\n f\"\"\"F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: {false_negatives}\n False positives: {false_positives}\"\"\"\n )\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\nfig, plts = plt.subplots(3)\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in\n roc_points])\nplts[0].set_ylabel('Roc Curve')\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'\n )\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'\n )\nplts[2].legend(loc='upper right')\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-4": "from os import listdir\nimport re\nimport numpy as np\nfrom sklearn.metrics import f1_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import LeaveOneOut\nimport matplotlib.pyplot as plt\nn_gram_range = 1, 1\nalpha_smoothing = 1e-10\nlambdas_best = [1e+190, 1]\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\nparts_X = []\nparts_Y = []\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', 'r')\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + ' ' + two\n parts_Y[-1].append(re.findall('\\\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\nroc_points = []\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),\n np.array(parts_X), np.array(parts_Y), thresh / 10))\nf1_points = []\ntrue_positives_list = []\nfalse_positives_list = []\ntrue_negatives_list = []\nfalse_negatives_list = []\nlambda_ratios = [1, 100000.0, 10000000000.0, 1e+20, 1e+40, 1e+80, 1e+160, \n 1e+190]\nfor lambda_ratio in lambda_ratios:\n (f1, true_negatives, true_positives, false_negatives, false_positives) = (\n get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=\n alpha_smoothing), np.array(parts_X), np.array(parts_Y)))\n print(\n f\"\"\"F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: {false_negatives}\n False positives: {false_positives}\"\"\"\n )\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\nfig, plts = plt.subplots(3)\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in\n roc_points])\nplts[0].set_ylabel('Roc Curve')\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'\n )\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'\n )\nplts[2].legend(loc='upper right')\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-5": "from os import listdir\nimport re\nimport numpy as np\nfrom sklearn.metrics import f1_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import LeaveOneOut\nimport matplotlib.pyplot as plt\n\nn_gram_range = (1, 1)\nalpha_smoothing = 1e-10\nlambdas_best = [1e190, 1]\n\n\ndef parse_doc_line(line):\n parsed = re.search(r'\\d[\\d\\s]+\\d', line)\n return \"empty\" if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions,\n ['spmsg' if prediction[0] <= threshold else 'legit' for prediction in\n clf.predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n\n true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n else:\n if answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = (\n 1 - (true_negatives_ / (true_negatives_ + false_positives_)),\n true_positives_ / (true_positives_ + false_negatives_))\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n\n true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n else:\n if answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return f1_result, true_negatives_, true_positives_, false_negatives_, false_positives_\n\n\nparts_X = []\nparts_Y = []\n\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', \"r\")\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + \" \" + two\n parts_Y[-1].append(re.findall(r'\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\n\nroc_points = []\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(\n MultinomialNB(alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y), thresh / 10))\n\nf1_points = []\ntrue_positives_list = []\nfalse_positives_list = []\ntrue_negatives_list = []\nfalse_negatives_list = []\nlambda_ratios = [1, 1e5, 1e10, 1e20, 1e40, 1e80, 1e160, 1e190]\nfor lambda_ratio in lambda_ratios:\n f1, true_negatives, true_positives, false_negatives, false_positives = get_cv_score(\n MultinomialNB(class_prior=(lambda_ratio, 1), alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y))\n print(f'F1 score: {f1}\\n True negatives: {true_negatives}\\n True positives: {true_positives}\\n False negatives: '\n f'{false_negatives}\\n False positives: {false_positives}')\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\n\nfig, plts = plt.subplots(3)\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in roc_points])\nplts[0].set_ylabel('Roc Curve')\n\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\n\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives')\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives')\nplts[2].legend(loc=\"upper right\")\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)
)
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.
chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=
ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)
)
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.
chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=
ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
<|reserved_special_token_1|>
import os
import z5py
from shutil import copytree, copyfile
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)
)
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.
chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=
ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
<|reserved_special_token_1|>
import os
import z5py
from shutil import copytree, copyfile
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(
os.path.join(ref_path, 'attributes.json'),
os.path.join(out_path, 'attributes.json')
)
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(
os.path.join(in_path, in_key),
os.path.join(out_path, out_key)
)
# copy training, test and val data to scratch
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print("Copying", name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo))
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
# make_small_volume()
|
flexible
|
{
"blob_id": "9d3db4ca5bf964c68e9778a3625c842e74bf9dbd",
"index": 1228,
"step-1": "<mask token>\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)\n )\n raw = ds_r[bb]\n labels = ds_l[bb]\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.\n chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=\n ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n",
"step-3": "<mask token>\nROOT = '/g/kreshuk/pape/Work/data/mito_em/data'\nSCRATCH = '/scratch/pape/mito_em/data'\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)\n )\n raw = ds_r[bb]\n labels = ds_l[bb]\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.\n chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=\n ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n",
"step-4": "import os\nimport z5py\nfrom shutil import copytree, copyfile\nROOT = '/g/kreshuk/pape/Work/data/mito_em/data'\nSCRATCH = '/scratch/pape/mito_em/data'\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)\n )\n raw = ds_r[bb]\n labels = ds_l[bb]\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.\n chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=\n ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n",
"step-5": "import os\nimport z5py\nfrom shutil import copytree, copyfile\n\nROOT = '/g/kreshuk/pape/Work/data/mito_em/data'\nSCRATCH = '/scratch/pape/mito_em/data'\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(\n os.path.join(ref_path, 'attributes.json'),\n os.path.join(out_path, 'attributes.json')\n )\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n\n in_key = 'setup0/timepoint0/s0'\n copytree(\n os.path.join(in_path, in_key),\n os.path.join(out_path, out_key)\n )\n\n\n# copy training, test and val data to scratch\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print(\"Copying\", name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo))\n\n raw = ds_r[bb]\n labels = ds_l[bb]\n\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n # make_small_volume()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)
plt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',
linewidth=4)
plt.axis([0, 6, 0, 26])
plt.legend(loc='upper right')
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)
plt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',
linewidth=4)
plt.axis([0, 6, 0, 26])
plt.legend(loc='upper right')
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
'go-', label='line 1', linewidth=2)
plt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],
'rs--', label='line 2', linewidth=4)
plt.axis([0, 6, 0, 26])
plt.legend(loc="upper right")
plt.show()
|
flexible
|
{
"blob_id": "7eeba06e78bd1e7139b1706574c4d040465d4566",
"index": 4178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',\n linewidth=4)\nplt.axis([0, 6, 0, 26])\nplt.legend(loc='upper right')\nplt.show()\n",
"step-3": "import matplotlib.pyplot as plt\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',\n linewidth=4)\nplt.axis([0, 6, 0, 26])\nplt.legend(loc='upper right')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\n\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],\n 'go-', label='line 1', linewidth=2)\n\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],\n 'rs--', label='line 2', linewidth=4)\n\nplt.axis([0, 6, 0, 26])\nplt.legend(loc=\"upper right\")\nplt.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def flatten(l):
return [j for i in l for j in i]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def flatten(l):
return [j for i in l for j in i]
def filter_sequences_by_len_from_fasta(file, max_len):
with open(file) as handle:
return [str(record.seq) for record in SeqIO.parse(handle, 'fasta') if
len(record.seq) <= max_len]
<|reserved_special_token_1|>
from Bio import SeqIO
def flatten(l):
return [j for i in l for j in i]
def filter_sequences_by_len_from_fasta(file, max_len):
with open(file) as handle:
return [str(record.seq) for record in SeqIO.parse(handle, 'fasta') if
len(record.seq) <= max_len]
|
flexible
|
{
"blob_id": "1fdb9db4c1c8b83c72eeb34f10ef9d289b43b79f",
"index": 3166,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef flatten(l):\n return [j for i in l for j in i]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef flatten(l):\n return [j for i in l for j in i]\n\n\ndef filter_sequences_by_len_from_fasta(file, max_len):\n with open(file) as handle:\n return [str(record.seq) for record in SeqIO.parse(handle, 'fasta') if\n len(record.seq) <= max_len]\n",
"step-4": "from Bio import SeqIO\n\n\ndef flatten(l):\n return [j for i in l for j in i]\n\n\ndef filter_sequences_by_len_from_fasta(file, max_len):\n with open(file) as handle:\n return [str(record.seq) for record in SeqIO.parse(handle, 'fasta') if\n len(record.seq) <= max_len]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
input = open('input').read()
stacks_input, instructions = input.split('\n\n')
stacks_input_lines = stacks_input.split('\n')
stack_numbers = map(int, stacks_input_lines[-1].split())
stacks = []
for _ in stack_numbers:
stacks.append([])
for line in stacks_input_lines[:-1]:
for stack_index, i in enumerate(range(1, len(line), 4)):
crate = line[i]
if crate != ' ':
stacks[stack_index].insert(0, crate)
for instruction in instructions.strip().split('\n'):
_move, crate_count, _from, from_stack_index, _to, to_stack_index = instruction.split()
crate_count = int(crate_count)
from_stack_index = int(from_stack_index) - 1
to_stack_index = int(to_stack_index) - 1
crates = stacks[from_stack_index][-crate_count:]
stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]
stacks[to_stack_index].extend(reversed(crates))
result = ''
for stack in stacks:
result += stack[-1]
print(result)
|
normal
|
{
"blob_id": "4927a440093e822250af25dfd6a2ce62d7cc099e",
"index": 8786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in stack_numbers:\n stacks.append([])\nfor line in stacks_input_lines[:-1]:\n for stack_index, i in enumerate(range(1, len(line), 4)):\n crate = line[i]\n if crate != ' ':\n stacks[stack_index].insert(0, crate)\nfor instruction in instructions.strip().split('\\n'):\n _move, crate_count, _from, from_stack_index, _to, to_stack_index = (\n instruction.split())\n crate_count = int(crate_count)\n from_stack_index = int(from_stack_index) - 1\n to_stack_index = int(to_stack_index) - 1\n crates = stacks[from_stack_index][-crate_count:]\n stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]\n stacks[to_stack_index].extend(reversed(crates))\n<mask token>\nfor stack in stacks:\n result += stack[-1]\nprint(result)\n",
"step-3": "input = open('input').read()\nstacks_input, instructions = input.split('\\n\\n')\nstacks_input_lines = stacks_input.split('\\n')\nstack_numbers = map(int, stacks_input_lines[-1].split())\nstacks = []\nfor _ in stack_numbers:\n stacks.append([])\nfor line in stacks_input_lines[:-1]:\n for stack_index, i in enumerate(range(1, len(line), 4)):\n crate = line[i]\n if crate != ' ':\n stacks[stack_index].insert(0, crate)\nfor instruction in instructions.strip().split('\\n'):\n _move, crate_count, _from, from_stack_index, _to, to_stack_index = (\n instruction.split())\n crate_count = int(crate_count)\n from_stack_index = int(from_stack_index) - 1\n to_stack_index = int(to_stack_index) - 1\n crates = stacks[from_stack_index][-crate_count:]\n stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]\n stacks[to_stack_index].extend(reversed(crates))\nresult = ''\nfor stack in stacks:\n result += stack[-1]\nprint(result)\n",
"step-4": "input = open('input').read()\n\nstacks_input, instructions = input.split('\\n\\n')\nstacks_input_lines = stacks_input.split('\\n')\nstack_numbers = map(int, stacks_input_lines[-1].split())\nstacks = []\nfor _ in stack_numbers:\n stacks.append([])\nfor line in stacks_input_lines[:-1]:\n for stack_index, i in enumerate(range(1, len(line), 4)):\n crate = line[i]\n if crate != ' ':\n stacks[stack_index].insert(0, crate)\n\nfor instruction in instructions.strip().split('\\n'):\n _move, crate_count, _from, from_stack_index, _to, to_stack_index = instruction.split()\n crate_count = int(crate_count)\n from_stack_index = int(from_stack_index) - 1\n to_stack_index = int(to_stack_index) - 1\n crates = stacks[from_stack_index][-crate_count:]\n stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]\n stacks[to_stack_index].extend(reversed(crates))\n\nresult = ''\nfor stack in stacks:\n result += stack[-1]\n\nprint(result)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
st.write('hi')
<|reserved_special_token_1|>
import streamlit as st
st.write('hi')
|
flexible
|
{
"blob_id": "62ca95a871c16191fb8f56213646e8173f400630",
"index": 8017,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nst.write('hi')\n",
"step-3": "import streamlit as st\nst.write('hi')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Rank:
class Stats(object):
"""Holds info used to calculate amount of xp a player gets"""
post_likes = 0
post_dislikes = 0
comment_likes = 0
comment_dislikes = 0
usage = 0
class Interval(object):
"""A class representing an interval. It is always [a, b)."""
def __init__(self, a, b):
self.a = a
self.b = b
def contains(self, n):
return self.a >= n and n < b
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def from_model(self):
pass
def from_proto(self):
pass
def to_model(self):
pass
def to_proto(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rank:
class Stats(object):
"""Holds info used to calculate amount of xp a player gets"""
post_likes = 0
post_dislikes = 0
comment_likes = 0
comment_dislikes = 0
usage = 0
class Interval(object):
"""A class representing an interval. It is always [a, b)."""
def __init__(self, a, b):
self.a = a
self.b = b
def contains(self, n):
return self.a >= n and n < b
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _calculate_level(self):
return math.sqrt(LEVEL_RATE * self._xp)
def from_model(self):
pass
def from_proto(self):
pass
def to_model(self):
pass
def to_proto(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rank:
class Stats(object):
"""Holds info used to calculate amount of xp a player gets"""
post_likes = 0
post_dislikes = 0
comment_likes = 0
comment_dislikes = 0
usage = 0
class Interval(object):
"""A class representing an interval. It is always [a, b)."""
def __init__(self, a, b):
self.a = a
self.b = b
def contains(self, n):
return self.a >= n and n < b
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def consume_stats(self, stats):
total_arr = [STAT_WORTH['post_likes'] * stats.post_likes,
STAT_WORTH['post_dislikes'] * stats.post_dislikes, STAT_WORTH[
'comment_likes'] * stats.comment_likes, STAT_WORTH[
'comment_dislikes'] * stats.comment_dislikes, STAT_WORTH[
'usage'] * stats.usage]
self._xp = sum(total_arr)
self._level = self._calculate_level()
def _calculate_level(self):
return math.sqrt(LEVEL_RATE * self._xp)
def from_model(self):
pass
def from_proto(self):
pass
def to_model(self):
pass
def to_proto(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rank:
class Stats(object):
"""Holds info used to calculate amount of xp a player gets"""
post_likes = 0
post_dislikes = 0
comment_likes = 0
comment_dislikes = 0
usage = 0
class Interval(object):
"""A class representing an interval. It is always [a, b)."""
def __init__(self, a, b):
self.a = a
self.b = b
def contains(self, n):
return self.a >= n and n < b
XP_INTERVALS = [Interval(0, 100), Interval(100, 250), Interval(250,
1000), Interval(100, 250), Interval(100, 250), Interval(100, 250),
Interval(100, 250), Interval(100, 250), Interval(100, 250),
Interval(100, 250), Interval(100, 250), Interval(100, 250),
Interval(100, 250), Interval(100, 250)]
STAT_WORTH = {'post_likes': 1, 'post_dislikes': -1, 'comment_likes': 1,
'comment_dislikes': -1, 'usage': 1}
LEVEL_RATE = 0.2
def __init__(self):
self._xp = 0
self._level = 0
self._label = ''
def consume_stats(self, stats):
total_arr = [STAT_WORTH['post_likes'] * stats.post_likes,
STAT_WORTH['post_dislikes'] * stats.post_dislikes, STAT_WORTH[
'comment_likes'] * stats.comment_likes, STAT_WORTH[
'comment_dislikes'] * stats.comment_dislikes, STAT_WORTH[
'usage'] * stats.usage]
self._xp = sum(total_arr)
self._level = self._calculate_level()
def _calculate_level(self):
return math.sqrt(LEVEL_RATE * self._xp)
def from_model(self):
pass
def from_proto(self):
pass
def to_model(self):
pass
def to_proto(self):
pass
<|reserved_special_token_1|>
import math
class Rank:
class Stats(object):
'''Holds info used to calculate amount of xp a player gets'''
post_likes = 0
post_dislikes = 0
comment_likes = 0
comment_dislikes = 0
usage = 0
class Interval(object):
'''A class representing an interval. It is always [a, b).'''
def __init__(self, a, b):
self.a = a
self.b = b
def contains(self, n):
return self.a >= n and n < b
# Each index in this array corresponds to the level for that xp interval.
XP_INTERVALS = [
Interval(0, 100),
Interval(100, 250),
Interval(250, 1000),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
]
STAT_WORTH = {
'post_likes': 1,
'post_dislikes': -1,
'comment_likes': 1,
'comment_dislikes': -1,
'usage': 1
}
# Tweaks how far apart each of the levels are. For example, the closer to
# zero this is, the further apart the levels.
LEVEL_RATE = 0.2
def __init__(self):
self._xp = 0
self._level = 0
self._label = ''
def consume_stats(self, stats):
total_arr = [
STAT_WORTH['post_likes']*stats.post_likes,
STAT_WORTH['post_dislikes']*stats.post_dislikes,
STAT_WORTH['comment_likes']*stats.comment_likes,
STAT_WORTH['comment_dislikes']*stats.comment_dislikes,
STAT_WORTH['usage']*stats.usage,
]
self._xp = sum(total_arr)
self._level = self._calculate_level()
def _calculate_level(self):
return math.sqrt(LEVEL_RATE*self._xp)
def from_model(self):
pass
def from_proto(self):
pass
def to_model(self):
pass
def to_proto(self):
pass
|
flexible
|
{
"blob_id": "cd0b55e163851344273ad020d434cc8662083d19",
"index": 6593,
"step-1": "<mask token>\n\n\nclass Rank:\n\n\n class Stats(object):\n \"\"\"Holds info used to calculate amount of xp a player gets\"\"\"\n post_likes = 0\n post_dislikes = 0\n comment_likes = 0\n comment_dislikes = 0\n usage = 0\n\n\n class Interval(object):\n \"\"\"A class representing an interval. It is always [a, b).\"\"\"\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def contains(self, n):\n return self.a >= n and n < b\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def from_model(self):\n pass\n\n def from_proto(self):\n pass\n\n def to_model(self):\n pass\n\n def to_proto(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass Rank:\n\n\n class Stats(object):\n \"\"\"Holds info used to calculate amount of xp a player gets\"\"\"\n post_likes = 0\n post_dislikes = 0\n comment_likes = 0\n comment_dislikes = 0\n usage = 0\n\n\n class Interval(object):\n \"\"\"A class representing an interval. It is always [a, b).\"\"\"\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def contains(self, n):\n return self.a >= n and n < b\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _calculate_level(self):\n return math.sqrt(LEVEL_RATE * self._xp)\n\n def from_model(self):\n pass\n\n def from_proto(self):\n pass\n\n def to_model(self):\n pass\n\n def to_proto(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass Rank:\n\n\n class Stats(object):\n \"\"\"Holds info used to calculate amount of xp a player gets\"\"\"\n post_likes = 0\n post_dislikes = 0\n comment_likes = 0\n comment_dislikes = 0\n usage = 0\n\n\n class Interval(object):\n \"\"\"A class representing an interval. It is always [a, b).\"\"\"\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def contains(self, n):\n return self.a >= n and n < b\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def consume_stats(self, stats):\n total_arr = [STAT_WORTH['post_likes'] * stats.post_likes, \n STAT_WORTH['post_dislikes'] * stats.post_dislikes, STAT_WORTH[\n 'comment_likes'] * stats.comment_likes, STAT_WORTH[\n 'comment_dislikes'] * stats.comment_dislikes, STAT_WORTH[\n 'usage'] * stats.usage]\n self._xp = sum(total_arr)\n self._level = self._calculate_level()\n\n def _calculate_level(self):\n return math.sqrt(LEVEL_RATE * self._xp)\n\n def from_model(self):\n pass\n\n def from_proto(self):\n pass\n\n def to_model(self):\n pass\n\n def to_proto(self):\n pass\n",
"step-4": "<mask token>\n\n\nclass Rank:\n\n\n class Stats(object):\n \"\"\"Holds info used to calculate amount of xp a player gets\"\"\"\n post_likes = 0\n post_dislikes = 0\n comment_likes = 0\n comment_dislikes = 0\n usage = 0\n\n\n class Interval(object):\n \"\"\"A class representing an interval. It is always [a, b).\"\"\"\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def contains(self, n):\n return self.a >= n and n < b\n XP_INTERVALS = [Interval(0, 100), Interval(100, 250), Interval(250, \n 1000), Interval(100, 250), Interval(100, 250), Interval(100, 250),\n Interval(100, 250), Interval(100, 250), Interval(100, 250),\n Interval(100, 250), Interval(100, 250), Interval(100, 250),\n Interval(100, 250), Interval(100, 250)]\n STAT_WORTH = {'post_likes': 1, 'post_dislikes': -1, 'comment_likes': 1,\n 'comment_dislikes': -1, 'usage': 1}\n LEVEL_RATE = 0.2\n\n def __init__(self):\n self._xp = 0\n self._level = 0\n self._label = ''\n\n def consume_stats(self, stats):\n total_arr = [STAT_WORTH['post_likes'] * stats.post_likes, \n STAT_WORTH['post_dislikes'] * stats.post_dislikes, STAT_WORTH[\n 'comment_likes'] * stats.comment_likes, STAT_WORTH[\n 'comment_dislikes'] * stats.comment_dislikes, STAT_WORTH[\n 'usage'] * stats.usage]\n self._xp = sum(total_arr)\n self._level = self._calculate_level()\n\n def _calculate_level(self):\n return math.sqrt(LEVEL_RATE * self._xp)\n\n def from_model(self):\n pass\n\n def from_proto(self):\n pass\n\n def to_model(self):\n pass\n\n def to_proto(self):\n pass\n",
"step-5": "import math\n\nclass Rank:\n\n class Stats(object):\n '''Holds info used to calculate amount of xp a player gets'''\n post_likes = 0\n post_dislikes = 0\n comment_likes = 0\n comment_dislikes = 0\n usage = 0\n\n class Interval(object):\n '''A class representing an interval. It is always [a, b).'''\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def contains(self, n):\n return self.a >= n and n < b\n\n # Each index in this array corresponds to the level for that xp interval.\n XP_INTERVALS = [\n Interval(0, 100),\n Interval(100, 250),\n Interval(250, 1000),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n Interval(100, 250),\n ]\n\n STAT_WORTH = {\n 'post_likes': 1,\n 'post_dislikes': -1,\n 'comment_likes': 1,\n 'comment_dislikes': -1,\n 'usage': 1\n }\n\n # Tweaks how far apart each of the levels are. For example, the closer to\n # zero this is, the further apart the levels.\n LEVEL_RATE = 0.2\n\n def __init__(self):\n self._xp = 0\n self._level = 0\n self._label = ''\n\n def consume_stats(self, stats):\n total_arr = [\n STAT_WORTH['post_likes']*stats.post_likes,\n STAT_WORTH['post_dislikes']*stats.post_dislikes,\n STAT_WORTH['comment_likes']*stats.comment_likes,\n STAT_WORTH['comment_dislikes']*stats.comment_dislikes,\n STAT_WORTH['usage']*stats.usage,\n ]\n self._xp = sum(total_arr)\n self._level = self._calculate_level()\n\n def _calculate_level(self):\n return math.sqrt(LEVEL_RATE*self._xp)\n\n def from_model(self):\n pass\n\n def from_proto(self):\n pass\n\n def to_model(self):\n pass\n\n def to_proto(self):\n pass\n",
"step-ids": [
5,
6,
7,
9,
11
]
}
|
[
5,
6,
7,
9,
11
] |
<|reserved_special_token_0|>
class channel(gr.hier_block2):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set_k(self, k):
self.k = k
self.channels_fading_model_0.set_K(self.k)
def get_tchannel(self):
return self.tchannel
def set_tchannel(self, tchannel):
self.tchannel = tchannel
self.Multiplexer_mux_0.set_sel(self.tchannel)
def get_voltage(self):
return self.voltage
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class channel(gr.hier_block2):
<|reserved_special_token_0|>
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
self.channels_fading_model_0.set_K(self.k)
def get_tchannel(self):
return self.tchannel
def set_tchannel(self, tchannel):
self.tchannel = tchannel
self.Multiplexer_mux_0.set_sel(self.tchannel)
def get_voltage(self):
return self.voltage
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class channel(gr.hier_block2):
def __init__(self, k=4.0, tchannel=1, voltage=0):
gr.hier_block2.__init__(self, 'channel', gr.io_signature(1, 1, gr.
sizeof_float * 1), gr.io_signature(1, 1, gr.sizeof_float * 1))
self.k = k
self.tchannel = tchannel
self.voltage = voltage
self.channels_fading_model_0_0 = channels.fading_model(8, 5 / 32000,
False, 4.0, 0)
self.channels_fading_model_0 = channels.fading_model(8, 5 / 32000,
True, k, 0)
self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)
self.blocks_complex_to_float_0 = blocks.complex_to_float(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.analog_noise_source_x_0 = analog.noise_source_f(analog.
GR_GAUSSIAN, voltage, 0)
self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)
self.connect((self.Multiplexer_mux_0, 0), (self, 0))
self.connect((self.analog_noise_source_x_0, 0), (self.
blocks_add_xx_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))
self.connect((self.blocks_complex_to_float_0, 0), (self.
Multiplexer_mux_0, 2))
self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.
Multiplexer_mux_0, 1))
self.connect((self.blocks_float_to_complex_0, 0), (self.
channels_fading_model_0_0, 0))
self.connect((self.blocks_float_to_complex_0_0, 0), (self.
channels_fading_model_0, 0))
self.connect((self.channels_fading_model_0, 0), (self.
blocks_complex_to_float_0_0_0, 0))
self.connect((self.channels_fading_model_0_0, 0), (self.
blocks_complex_to_float_0, 0))
self.connect((self, 0), (self.blocks_add_xx_0, 1))
self.connect((self, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
self.channels_fading_model_0.set_K(self.k)
def get_tchannel(self):
return self.tchannel
def set_tchannel(self, tchannel):
self.tchannel = tchannel
self.Multiplexer_mux_0.set_sel(self.tchannel)
def get_voltage(self):
return self.voltage
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class channel(gr.hier_block2):
def __init__(self, k=4.0, tchannel=1, voltage=0):
gr.hier_block2.__init__(self, 'channel', gr.io_signature(1, 1, gr.
sizeof_float * 1), gr.io_signature(1, 1, gr.sizeof_float * 1))
self.k = k
self.tchannel = tchannel
self.voltage = voltage
self.channels_fading_model_0_0 = channels.fading_model(8, 5 / 32000,
False, 4.0, 0)
self.channels_fading_model_0 = channels.fading_model(8, 5 / 32000,
True, k, 0)
self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)
self.blocks_complex_to_float_0 = blocks.complex_to_float(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.analog_noise_source_x_0 = analog.noise_source_f(analog.
GR_GAUSSIAN, voltage, 0)
self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)
self.connect((self.Multiplexer_mux_0, 0), (self, 0))
self.connect((self.analog_noise_source_x_0, 0), (self.
blocks_add_xx_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))
self.connect((self.blocks_complex_to_float_0, 0), (self.
Multiplexer_mux_0, 2))
self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.
Multiplexer_mux_0, 1))
self.connect((self.blocks_float_to_complex_0, 0), (self.
channels_fading_model_0_0, 0))
self.connect((self.blocks_float_to_complex_0_0, 0), (self.
channels_fading_model_0, 0))
self.connect((self.channels_fading_model_0, 0), (self.
blocks_complex_to_float_0_0_0, 0))
self.connect((self.channels_fading_model_0_0, 0), (self.
blocks_complex_to_float_0, 0))
self.connect((self, 0), (self.blocks_add_xx_0, 1))
self.connect((self, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
self.channels_fading_model_0.set_K(self.k)
def get_tchannel(self):
return self.tchannel
def set_tchannel(self, tchannel):
self.tchannel = tchannel
self.Multiplexer_mux_0.set_sel(self.tchannel)
def get_voltage(self):
return self.voltage
def set_voltage(self, voltage):
self.voltage = voltage
self.analog_noise_source_x_0.set_amplitude(self.voltage)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: channel
# Author: Maria Camila Herrera Ramos
# Generated: Thu Aug 2 18:09:17 2018
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import channels
from gnuradio import gr
from gnuradio.filter import firdes
import Multiplexer
class channel(gr.hier_block2):
def __init__(self, k=4.0, tchannel=1, voltage=0):
gr.hier_block2.__init__(
self, "channel",
gr.io_signature(1, 1, gr.sizeof_float*1),
gr.io_signature(1, 1, gr.sizeof_float*1),
)
##################################################
# Parameters
##################################################
self.k = k
self.tchannel = tchannel
self.voltage = voltage
##################################################
# Blocks
##################################################
self.channels_fading_model_0_0 = channels.fading_model( 8, 5/32000, False, 4.0, 0 )
self.channels_fading_model_0 = channels.fading_model( 8, 5/32000, True, k, 0 )
self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)
self.blocks_complex_to_float_0 = blocks.complex_to_float(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.analog_noise_source_x_0 = analog.noise_source_f(analog.GR_GAUSSIAN, voltage, 0)
self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)
##################################################
# Connections
##################################################
self.connect((self.Multiplexer_mux_0, 0), (self, 0))
self.connect((self.analog_noise_source_x_0, 0), (self.blocks_add_xx_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))
self.connect((self.blocks_complex_to_float_0, 0), (self.Multiplexer_mux_0, 2))
self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.Multiplexer_mux_0, 1))
self.connect((self.blocks_float_to_complex_0, 0), (self.channels_fading_model_0_0, 0))
self.connect((self.blocks_float_to_complex_0_0, 0), (self.channels_fading_model_0, 0))
self.connect((self.channels_fading_model_0, 0), (self.blocks_complex_to_float_0_0_0, 0))
self.connect((self.channels_fading_model_0_0, 0), (self.blocks_complex_to_float_0, 0))
self.connect((self, 0), (self.blocks_add_xx_0, 1))
self.connect((self, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
self.channels_fading_model_0.set_K(self.k)
def get_tchannel(self):
return self.tchannel
def set_tchannel(self, tchannel):
self.tchannel = tchannel
self.Multiplexer_mux_0.set_sel(self.tchannel)
def get_voltage(self):
return self.voltage
def set_voltage(self, voltage):
self.voltage = voltage
self.analog_noise_source_x_0.set_amplitude(self.voltage)
|
flexible
|
{
"blob_id": "8adf25fbffc14d6927d665931e54a7d699a3b439",
"index": 6202,
"step-1": "<mask token>\n\n\nclass channel(gr.hier_block2):\n <mask token>\n <mask token>\n\n def set_k(self, k):\n self.k = k\n self.channels_fading_model_0.set_K(self.k)\n\n def get_tchannel(self):\n return self.tchannel\n\n def set_tchannel(self, tchannel):\n self.tchannel = tchannel\n self.Multiplexer_mux_0.set_sel(self.tchannel)\n\n def get_voltage(self):\n return self.voltage\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass channel(gr.hier_block2):\n <mask token>\n\n def get_k(self):\n return self.k\n\n def set_k(self, k):\n self.k = k\n self.channels_fading_model_0.set_K(self.k)\n\n def get_tchannel(self):\n return self.tchannel\n\n def set_tchannel(self, tchannel):\n self.tchannel = tchannel\n self.Multiplexer_mux_0.set_sel(self.tchannel)\n\n def get_voltage(self):\n return self.voltage\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass channel(gr.hier_block2):\n\n def __init__(self, k=4.0, tchannel=1, voltage=0):\n gr.hier_block2.__init__(self, 'channel', gr.io_signature(1, 1, gr.\n sizeof_float * 1), gr.io_signature(1, 1, gr.sizeof_float * 1))\n self.k = k\n self.tchannel = tchannel\n self.voltage = voltage\n self.channels_fading_model_0_0 = channels.fading_model(8, 5 / 32000,\n False, 4.0, 0)\n self.channels_fading_model_0 = channels.fading_model(8, 5 / 32000, \n True, k, 0)\n self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)\n self.blocks_float_to_complex_0 = blocks.float_to_complex(1)\n self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)\n self.blocks_complex_to_float_0 = blocks.complex_to_float(1)\n self.blocks_add_xx_0 = blocks.add_vff(1)\n self.analog_noise_source_x_0 = analog.noise_source_f(analog.\n GR_GAUSSIAN, voltage, 0)\n self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)\n self.connect((self.Multiplexer_mux_0, 0), (self, 0))\n self.connect((self.analog_noise_source_x_0, 0), (self.\n blocks_add_xx_0, 0))\n self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))\n self.connect((self.blocks_complex_to_float_0, 0), (self.\n Multiplexer_mux_0, 2))\n self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.\n Multiplexer_mux_0, 1))\n self.connect((self.blocks_float_to_complex_0, 0), (self.\n channels_fading_model_0_0, 0))\n self.connect((self.blocks_float_to_complex_0_0, 0), (self.\n channels_fading_model_0, 0))\n self.connect((self.channels_fading_model_0, 0), (self.\n blocks_complex_to_float_0_0_0, 0))\n self.connect((self.channels_fading_model_0_0, 0), (self.\n blocks_complex_to_float_0, 0))\n self.connect((self, 0), (self.blocks_add_xx_0, 1))\n self.connect((self, 0), (self.blocks_float_to_complex_0, 0))\n self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))\n\n def get_k(self):\n return self.k\n\n def set_k(self, k):\n self.k = k\n self.channels_fading_model_0.set_K(self.k)\n\n def get_tchannel(self):\n return self.tchannel\n\n def set_tchannel(self, tchannel):\n self.tchannel = tchannel\n self.Multiplexer_mux_0.set_sel(self.tchannel)\n\n def get_voltage(self):\n return self.voltage\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass channel(gr.hier_block2):\n\n def __init__(self, k=4.0, tchannel=1, voltage=0):\n gr.hier_block2.__init__(self, 'channel', gr.io_signature(1, 1, gr.\n sizeof_float * 1), gr.io_signature(1, 1, gr.sizeof_float * 1))\n self.k = k\n self.tchannel = tchannel\n self.voltage = voltage\n self.channels_fading_model_0_0 = channels.fading_model(8, 5 / 32000,\n False, 4.0, 0)\n self.channels_fading_model_0 = channels.fading_model(8, 5 / 32000, \n True, k, 0)\n self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)\n self.blocks_float_to_complex_0 = blocks.float_to_complex(1)\n self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)\n self.blocks_complex_to_float_0 = blocks.complex_to_float(1)\n self.blocks_add_xx_0 = blocks.add_vff(1)\n self.analog_noise_source_x_0 = analog.noise_source_f(analog.\n GR_GAUSSIAN, voltage, 0)\n self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)\n self.connect((self.Multiplexer_mux_0, 0), (self, 0))\n self.connect((self.analog_noise_source_x_0, 0), (self.\n blocks_add_xx_0, 0))\n self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))\n self.connect((self.blocks_complex_to_float_0, 0), (self.\n Multiplexer_mux_0, 2))\n self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.\n Multiplexer_mux_0, 1))\n self.connect((self.blocks_float_to_complex_0, 0), (self.\n channels_fading_model_0_0, 0))\n self.connect((self.blocks_float_to_complex_0_0, 0), (self.\n channels_fading_model_0, 0))\n self.connect((self.channels_fading_model_0, 0), (self.\n blocks_complex_to_float_0_0_0, 0))\n self.connect((self.channels_fading_model_0_0, 0), (self.\n blocks_complex_to_float_0, 0))\n self.connect((self, 0), (self.blocks_add_xx_0, 1))\n self.connect((self, 0), (self.blocks_float_to_complex_0, 0))\n self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))\n\n def get_k(self):\n return self.k\n\n def set_k(self, k):\n self.k = k\n self.channels_fading_model_0.set_K(self.k)\n\n def get_tchannel(self):\n return self.tchannel\n\n def set_tchannel(self, tchannel):\n self.tchannel = tchannel\n self.Multiplexer_mux_0.set_sel(self.tchannel)\n\n def get_voltage(self):\n return self.voltage\n\n def set_voltage(self, voltage):\n self.voltage = voltage\n self.analog_noise_source_x_0.set_amplitude(self.voltage)\n",
"step-5": "# -*- coding: utf-8 -*-\n##################################################\n# GNU Radio Python Flow Graph\n# Title: channel\n# Author: Maria Camila Herrera Ramos\n# Generated: Thu Aug 2 18:09:17 2018\n##################################################\n\n\nfrom gnuradio import analog\nfrom gnuradio import blocks\nfrom gnuradio import channels\nfrom gnuradio import gr\nfrom gnuradio.filter import firdes\nimport Multiplexer\n\n\nclass channel(gr.hier_block2):\n\n def __init__(self, k=4.0, tchannel=1, voltage=0):\n gr.hier_block2.__init__(\n self, \"channel\",\n gr.io_signature(1, 1, gr.sizeof_float*1),\n gr.io_signature(1, 1, gr.sizeof_float*1),\n )\n\n ##################################################\n # Parameters\n ##################################################\n self.k = k\n self.tchannel = tchannel\n self.voltage = voltage\n\n ##################################################\n # Blocks\n ##################################################\n self.channels_fading_model_0_0 = channels.fading_model( 8, 5/32000, False, 4.0, 0 )\n self.channels_fading_model_0 = channels.fading_model( 8, 5/32000, True, k, 0 )\n self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)\n self.blocks_float_to_complex_0 = blocks.float_to_complex(1)\n self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)\n self.blocks_complex_to_float_0 = blocks.complex_to_float(1)\n self.blocks_add_xx_0 = blocks.add_vff(1)\n self.analog_noise_source_x_0 = analog.noise_source_f(analog.GR_GAUSSIAN, voltage, 0)\n self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)\n\n ##################################################\n # Connections\n ##################################################\n self.connect((self.Multiplexer_mux_0, 0), (self, 0))\n self.connect((self.analog_noise_source_x_0, 0), (self.blocks_add_xx_0, 0))\n self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))\n self.connect((self.blocks_complex_to_float_0, 0), (self.Multiplexer_mux_0, 2))\n self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.Multiplexer_mux_0, 1))\n self.connect((self.blocks_float_to_complex_0, 0), (self.channels_fading_model_0_0, 0))\n self.connect((self.blocks_float_to_complex_0_0, 0), (self.channels_fading_model_0, 0))\n self.connect((self.channels_fading_model_0, 0), (self.blocks_complex_to_float_0_0_0, 0))\n self.connect((self.channels_fading_model_0_0, 0), (self.blocks_complex_to_float_0, 0))\n self.connect((self, 0), (self.blocks_add_xx_0, 1))\n self.connect((self, 0), (self.blocks_float_to_complex_0, 0))\n self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))\n\n def get_k(self):\n return self.k\n\n def set_k(self, k):\n self.k = k\n self.channels_fading_model_0.set_K(self.k)\n\n def get_tchannel(self):\n return self.tchannel\n\n def set_tchannel(self, tchannel):\n self.tchannel = tchannel\n self.Multiplexer_mux_0.set_sel(self.tchannel)\n\n def get_voltage(self):\n return self.voltage\n\n def set_voltage(self, voltage):\n self.voltage = voltage\n self.analog_noise_source_x_0.set_amplitude(self.voltage)\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(grade)
print(total)
print(avg)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
total = totalMarks(85, 67, 56, 45, 78)
avg = average(total)
grade = findGrade(avg)
print(grade)
print(total)
print(avg)
<|reserved_special_token_1|>
from functiona import *
total = totalMarks(85, 67, 56, 45, 78)
avg = average(total)
grade = findGrade(avg)
print(grade)
print(total)
print(avg)
|
flexible
|
{
"blob_id": "05f77472625e902b66c4a97a4c640835826bd494",
"index": 3635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(grade)\nprint(total)\nprint(avg)\n",
"step-3": "<mask token>\ntotal = totalMarks(85, 67, 56, 45, 78)\navg = average(total)\ngrade = findGrade(avg)\nprint(grade)\nprint(total)\nprint(avg)\n",
"step-4": "from functiona import *\ntotal = totalMarks(85, 67, 56, 45, 78)\navg = average(total)\ngrade = findGrade(avg)\nprint(grade)\nprint(total)\nprint(avg)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'orders'
urlpatterns = [path('checkout', views.order_checkout_view, name=
'orders-checkout')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [path('checkout', views.order_checkout_view, name=
'orders-checkout')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('checkout' , views.order_checkout_view , name='orders-checkout') ,
]
|
flexible
|
{
"blob_id": "031f668fbf75b54ec874a59f53c60ceca53779cf",
"index": 8942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'orders'\nurlpatterns = [path('checkout', views.order_checkout_view, name=\n 'orders-checkout')]\n",
"step-3": "from django.urls import path\nfrom . import views\napp_name = 'orders'\nurlpatterns = [path('checkout', views.order_checkout_view, name=\n 'orders-checkout')]\n",
"step-4": "from django.urls import path\n\nfrom . import views\n\napp_name = 'orders'\nurlpatterns = [\n path('checkout' , views.order_checkout_view , name='orders-checkout') ,\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Created by MechAviv
# [Maestra Fiametta] | [9390220]
# Commerci Republic : San Commerci
if sm.hasItem(4310100, 1):
sm.setSpeakerID(9390220)
sm.sendSayOkay("You can't start your voyage until you finish the tutorial quest!")
else:
sm.setSpeakerID(9390220)
sm.sendNext("What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.")
sm.setSpeakerID(9390220)
sm.sendSay("Just remember, you can't trade without gold!")
sm.giveItem(4310100, 10)
sm.setSpeakerID(9390220)
sm.sendPrev("Check to make sure there you have coins in your inventory.")
|
normal
|
{
"blob_id": "c4b9fdba9e9eeccc52999dab9232302f159c882a",
"index": 588,
"step-1": "<mask token>\n",
"step-2": "if sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\n \"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\n 'What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.'\n )\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev('Check to make sure there you have coins in your inventory.')\n",
"step-3": "# Created by MechAviv\n# [Maestra Fiametta] | [9390220]\n# Commerci Republic : San Commerci\nif sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\"What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.\")\n\n\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n\n\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev(\"Check to make sure there you have coins in your inventory.\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Lang:
def __init__(self):
super(Lang, self).__init__()
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.n_words = 0
def index_words(self, sentence):
for word in sentence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
<|reserved_special_token_0|>
def check_graph(G):
total_nodes = len(G.nodes)
no_emb_nodes = 0
nodes_to_delete = []
for node_str in G.nodes:
try:
emb = G.node[node_str]['emb']
except:
no_emb_nodes += 1
nodes_to_delete.append(node_str)
print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))
G.remove_nodes_from(nodes_to_delete)
return G
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Lang:
def __init__(self):
super(Lang, self).__init__()
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.n_words = 0
def index_words(self, sentence):
for word in sentence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def gen_embeddings(vocab, file, emb_size, emb_dim):
"""
Generate an initial embedding matrix for word_dict.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.zeros((vocab.n_words, emb_size))
print('Embeddings: %d x %d' % (vocab.n_words, emb_size))
if file is not None:
print('Loading embedding file: %s' % file)
pre_trained = 0
for line in open(file).readlines():
sp = line.split()
if len(sp) == emb_dim + 1:
if sp[0] in vocab.word2index:
pre_trained += 1
embeddings[vocab.word2index[sp[0]]] = [float(x) for x in
sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained *
100.0 / vocab.n_words))
return embeddings
def process_raw_abstracts(vocab):
with open(raw_save_path, 'r', encoding='utf8') as f:
for line in tqdm(f, total=13304586):
tokens = nltk.tokenize.word_tokenize(line)
tokens = [token for token in tokens if not token in nltk_stopwords]
vocab.index_words(tokens)
<|reserved_special_token_0|>
def check_graph(G):
total_nodes = len(G.nodes)
no_emb_nodes = 0
nodes_to_delete = []
for node_str in G.nodes:
try:
emb = G.node[node_str]['emb']
except:
no_emb_nodes += 1
nodes_to_delete.append(node_str)
print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))
G.remove_nodes_from(nodes_to_delete)
return G
def process_line(G, line, vocab=None):
try:
fos = data['fos']
abstract = data['indexed_abstract']
paper_id = data['id']
references_id = data['references']
text = list(abstract['InvertedIndex'].keys())
text = ' '.join(text)
if args.process_raw:
with open(raw_save_path, 'a+') as f:
f.write(text)
f.write('\n')
"""Create Node Embedding if Node doesn't exist """
if vocab is not None:
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token for token in tokens if not token in nltk_stopwords]
node_emb = get_node_embed(tokens, vocab)
for field in fos:
name = field['name']
for ref in references_id:
G.add_edge(paper_id, ref)
G.node[paper_id]['emb'] = node_emb
except:
return G
return G
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Lang:
def __init__(self):
super(Lang, self).__init__()
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.n_words = 0
def index_words(self, sentence):
for word in sentence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def gen_embeddings(vocab, file, emb_size, emb_dim):
"""
Generate an initial embedding matrix for word_dict.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.zeros((vocab.n_words, emb_size))
print('Embeddings: %d x %d' % (vocab.n_words, emb_size))
if file is not None:
print('Loading embedding file: %s' % file)
pre_trained = 0
for line in open(file).readlines():
sp = line.split()
if len(sp) == emb_dim + 1:
if sp[0] in vocab.word2index:
pre_trained += 1
embeddings[vocab.word2index[sp[0]]] = [float(x) for x in
sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained *
100.0 / vocab.n_words))
return embeddings
def process_raw_abstracts(vocab):
with open(raw_save_path, 'r', encoding='utf8') as f:
for line in tqdm(f, total=13304586):
tokens = nltk.tokenize.word_tokenize(line)
tokens = [token for token in tokens if not token in nltk_stopwords]
vocab.index_words(tokens)
def get_node_embed(text, vocab):
sum_embed = 0
for word in text:
embed = embeddings[vocab.word2index[word]]
sum_embed += embed
return sum_embed
def check_graph(G):
total_nodes = len(G.nodes)
no_emb_nodes = 0
nodes_to_delete = []
for node_str in G.nodes:
try:
emb = G.node[node_str]['emb']
except:
no_emb_nodes += 1
nodes_to_delete.append(node_str)
print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))
G.remove_nodes_from(nodes_to_delete)
return G
def process_line(G, line, vocab=None):
try:
fos = data['fos']
abstract = data['indexed_abstract']
paper_id = data['id']
references_id = data['references']
text = list(abstract['InvertedIndex'].keys())
text = ' '.join(text)
if args.process_raw:
with open(raw_save_path, 'a+') as f:
f.write(text)
f.write('\n')
"""Create Node Embedding if Node doesn't exist """
if vocab is not None:
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token for token in tokens if not token in nltk_stopwords]
node_emb = get_node_embed(tokens, vocab)
for field in fos:
name = field['name']
for ref in references_id:
G.add_edge(paper_id, ref)
G.node[paper_id]['emb'] = node_emb
except:
return G
return G
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
nltk.download('stopwords')
nltk_stopwords = nltk.corpus.stopwords.words('english')
data_path = '/home/joey.bose/dblp_papers_v11.txt'
save_path_base = '/home/joey.bose/aminer_data/'
load_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'
save_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'
raw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'
spacy_nlp = spacy.load('en_core_web_sm')
glove_path = (
'/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt')
class Lang:
def __init__(self):
super(Lang, self).__init__()
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.n_words = 0
def index_words(self, sentence):
for word in sentence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def gen_embeddings(vocab, file, emb_size, emb_dim):
"""
Generate an initial embedding matrix for word_dict.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.zeros((vocab.n_words, emb_size))
print('Embeddings: %d x %d' % (vocab.n_words, emb_size))
if file is not None:
print('Loading embedding file: %s' % file)
pre_trained = 0
for line in open(file).readlines():
sp = line.split()
if len(sp) == emb_dim + 1:
if sp[0] in vocab.word2index:
pre_trained += 1
embeddings[vocab.word2index[sp[0]]] = [float(x) for x in
sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained *
100.0 / vocab.n_words))
return embeddings
def process_raw_abstracts(vocab):
with open(raw_save_path, 'r', encoding='utf8') as f:
for line in tqdm(f, total=13304586):
tokens = nltk.tokenize.word_tokenize(line)
tokens = [token for token in tokens if not token in nltk_stopwords]
vocab.index_words(tokens)
def get_node_embed(text, vocab):
sum_embed = 0
for word in text:
embed = embeddings[vocab.word2index[word]]
sum_embed += embed
return sum_embed
def check_graph(G):
total_nodes = len(G.nodes)
no_emb_nodes = 0
nodes_to_delete = []
for node_str in G.nodes:
try:
emb = G.node[node_str]['emb']
except:
no_emb_nodes += 1
nodes_to_delete.append(node_str)
print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))
G.remove_nodes_from(nodes_to_delete)
return G
def process_line(G, line, vocab=None):
try:
fos = data['fos']
abstract = data['indexed_abstract']
paper_id = data['id']
references_id = data['references']
text = list(abstract['InvertedIndex'].keys())
text = ' '.join(text)
if args.process_raw:
with open(raw_save_path, 'a+') as f:
f.write(text)
f.write('\n')
"""Create Node Embedding if Node doesn't exist """
if vocab is not None:
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token for token in tokens if not token in nltk_stopwords]
node_emb = get_node_embed(tokens, vocab)
for field in fos:
name = field['name']
for ref in references_id:
G.add_edge(paper_id, ref)
G.node[paper_id]['emb'] = node_emb
except:
return G
return G
if __name__ == '__main__':
"""
Create Aminer-Citation v-11 Graphs
"""
parser = argparse.ArgumentParser()
parser.add_argument('--topk', type=int, default='100')
parser.add_argument('--process_raw', action='store_true', default=False,
help='Process Raw Data')
parser.add_argument('--make_vocab', action='store_true', default=False,
help='Create Vocab from the raw abstract data')
args = parser.parse_args()
onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(
load_path_rank_base, f))]
vocab = Lang()
if args.make_vocab:
process_raw_abstracts(vocab)
joblib.dump(vocab, 'aminer_100_vocab.pkl')
print('Done generating vocab')
embeddings = gen_embeddings(vocab, file=glove_path, emb_size=300,
emb_dim=300)
joblib.dump(embeddings, 'aminer_100_embed.pkl')
print('Done')
exit()
else:
vocab = joblib.load('aminer_100_vocab.pkl')
embeddings = joblib.load('aminer_100_embed.pkl')
for i, file_ in tqdm(enumerate(onlyfiles), total=len(onlyfiles)):
file_path = load_path_rank_base + file_
G = nx.Graph()
with open(file_path, 'r', encoding='utf8') as f:
for line in f:
data = json.loads(line)
G = process_line(G, data, vocab)
G = check_graph(G)
print('%s has %d Nodes and %d edges' % (file_, len(G), len(G.edges)))
if not os.path.exists(save_path_graph_base):
os.mkdir(save_path_graph_base)
save_path_graph = save_path_graph_base + file_.split('.')[0
] + '_graph.pkl'
nx.write_gpickle(G, save_path_graph)
<|reserved_special_token_1|>
import json
import os
import ipdb
from tqdm import tqdm
import argparse
from os import listdir
from os.path import isfile, join
import pickle
import joblib
from collections import Counter
from shutil import copyfile
import networkx as nx
import spacy
import nltk
import numpy as np
nltk.download('stopwords')
nltk_stopwords = nltk.corpus.stopwords.words('english')
data_path = '/home/joey.bose/dblp_papers_v11.txt'
save_path_base = '/home/joey.bose/aminer_data/'
load_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'
save_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'
raw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'
spacy_nlp = spacy.load('en_core_web_sm')
glove_path = '/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt'
class Lang:
def __init__(self):
super(Lang, self).__init__()
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.n_words = 0 # Count default tokens
def index_words(self, sentence):
for word in sentence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def gen_embeddings(vocab, file, emb_size, emb_dim):
"""
Generate an initial embedding matrix for word_dict.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
# embeddings = np.random.randn(vocab.n_words, emb_size) * 0.01
embeddings = np.zeros((vocab.n_words, emb_size))
print('Embeddings: %d x %d' % (vocab.n_words, emb_size))
if file is not None:
print('Loading embedding file: %s' % file)
pre_trained = 0
for line in open(file).readlines():
sp = line.split()
if(len(sp) == emb_dim + 1):
if sp[0] in vocab.word2index:
pre_trained += 1
embeddings[vocab.word2index[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / vocab.n_words))
return embeddings
def process_raw_abstracts(vocab):
with open(raw_save_path,"r",encoding="utf8") as f:
for line in tqdm(f,total=13304586):
tokens = nltk.tokenize.word_tokenize(line)
tokens = [token for token in tokens if not token in nltk_stopwords]
vocab.index_words(tokens)
def get_node_embed(text,vocab):
sum_embed = 0
for word in text:
embed = embeddings[vocab.word2index[word]]
sum_embed += embed
return sum_embed
def check_graph(G):
total_nodes = len(G.nodes)
no_emb_nodes = 0
nodes_to_delete = []
for node_str in G.nodes:
try:
emb = G.node[node_str]['emb']
except:
no_emb_nodes += 1
nodes_to_delete.append(node_str)
print("%d Nodes and %d missing nodes in G " %(total_nodes, no_emb_nodes))
G.remove_nodes_from(nodes_to_delete)
return G
def process_line(G, line, vocab=None):
try:
fos = data['fos']
abstract = data['indexed_abstract']
paper_id = data['id']
references_id = data['references']
text = list(abstract['InvertedIndex'].keys())
text =" ".join(text)
if args.process_raw:
with open(raw_save_path,"a+") as f:
f.write(text)
f.write('\n')
'''Create Node Embedding if Node doesn't exist '''
if vocab is not None:
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token for token in tokens if not token in nltk_stopwords]
node_emb = get_node_embed(tokens,vocab)
for field in fos:
name = field['name']
for ref in references_id:
G.add_edge(paper_id, ref)
G.node[paper_id]['emb'] = node_emb
except:
return G
return G
if __name__ == '__main__':
"""
Create Aminer-Citation v-11 Graphs
"""
parser = argparse.ArgumentParser()
parser.add_argument('--topk', type=int, default='100')
parser.add_argument("--process_raw", action="store_true", default=False,
help='Process Raw Data')
parser.add_argument("--make_vocab", action="store_true", default=False,
help='Create Vocab from the raw abstract data')
args = parser.parse_args()
onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(load_path_rank_base, f))]
vocab = Lang()
if args.make_vocab:
process_raw_abstracts(vocab)
joblib.dump(vocab, "aminer_100_vocab.pkl")
print("Done generating vocab")
embeddings = gen_embeddings(vocab,file=glove_path,emb_size=300,emb_dim=300)
joblib.dump(embeddings, "aminer_100_embed.pkl")
print("Done")
exit()
else:
vocab = joblib.load("aminer_100_vocab.pkl")
embeddings = joblib.load("aminer_100_embed.pkl")
for i, file_ in tqdm(enumerate(onlyfiles),total=len(onlyfiles)):
file_path = load_path_rank_base + file_
G = nx.Graph()
with open(file_path,'r', encoding="utf8") as f:
for line in f:
data = json.loads(line)
G = process_line(G,data,vocab)
G = check_graph(G)
print("%s has %d Nodes and %d edges" %(file_,len(G),len(G.edges)))
if not os.path.exists(save_path_graph_base):
os.mkdir(save_path_graph_base)
save_path_graph = save_path_graph_base + file_.split('.')[0] + '_graph.pkl'
nx.write_gpickle(G,save_path_graph)
|
flexible
|
{
"blob_id": "2da7892722afde5a6f87e3bd6d5763c895ac96c9",
"index": 284,
"step-1": "<mask token>\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\n<mask token>\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if len(sp) == emb_dim + 1:\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in\n sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * \n 100.0 / vocab.n_words))\n return embeddings\n\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path, 'r', encoding='utf8') as f:\n for line in tqdm(f, total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\n\n<mask token>\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text = ' '.join(text)\n if args.process_raw:\n with open(raw_save_path, 'a+') as f:\n f.write(text)\n f.write('\\n')\n \"\"\"Create Node Embedding if Node doesn't exist \"\"\"\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens, vocab)\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n return G\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if len(sp) == emb_dim + 1:\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in\n sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * \n 100.0 / vocab.n_words))\n return embeddings\n\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path, 'r', encoding='utf8') as f:\n for line in tqdm(f, total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\n\ndef get_node_embed(text, vocab):\n sum_embed = 0\n for word in text:\n embed = embeddings[vocab.word2index[word]]\n sum_embed += embed\n return sum_embed\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text = ' '.join(text)\n if args.process_raw:\n with open(raw_save_path, 'a+') as f:\n f.write(text)\n f.write('\\n')\n \"\"\"Create Node Embedding if Node doesn't exist \"\"\"\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens, vocab)\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n return G\n\n\n<mask token>\n",
"step-4": "<mask token>\nnltk.download('stopwords')\nnltk_stopwords = nltk.corpus.stopwords.words('english')\ndata_path = '/home/joey.bose/dblp_papers_v11.txt'\nsave_path_base = '/home/joey.bose/aminer_data/'\nload_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'\nsave_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'\nraw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'\nspacy_nlp = spacy.load('en_core_web_sm')\nglove_path = (\n '/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt')\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if len(sp) == emb_dim + 1:\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in\n sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * \n 100.0 / vocab.n_words))\n return embeddings\n\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path, 'r', encoding='utf8') as f:\n for line in tqdm(f, total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\n\ndef get_node_embed(text, vocab):\n sum_embed = 0\n for word in text:\n embed = embeddings[vocab.word2index[word]]\n sum_embed += embed\n return sum_embed\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text = ' '.join(text)\n if args.process_raw:\n with open(raw_save_path, 'a+') as f:\n f.write(text)\n f.write('\\n')\n \"\"\"Create Node Embedding if Node doesn't exist \"\"\"\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens, vocab)\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n return G\n\n\nif __name__ == '__main__':\n \"\"\"\n Create Aminer-Citation v-11 Graphs\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--topk', type=int, default='100')\n parser.add_argument('--process_raw', action='store_true', default=False,\n help='Process Raw Data')\n parser.add_argument('--make_vocab', action='store_true', default=False,\n help='Create Vocab from the raw abstract data')\n args = parser.parse_args()\n onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(\n load_path_rank_base, f))]\n vocab = Lang()\n if args.make_vocab:\n process_raw_abstracts(vocab)\n joblib.dump(vocab, 'aminer_100_vocab.pkl')\n print('Done generating vocab')\n embeddings = gen_embeddings(vocab, file=glove_path, emb_size=300,\n emb_dim=300)\n joblib.dump(embeddings, 'aminer_100_embed.pkl')\n print('Done')\n exit()\n else:\n vocab = joblib.load('aminer_100_vocab.pkl')\n embeddings = joblib.load('aminer_100_embed.pkl')\n for i, file_ in tqdm(enumerate(onlyfiles), total=len(onlyfiles)):\n file_path = load_path_rank_base + file_\n G = nx.Graph()\n with open(file_path, 'r', encoding='utf8') as f:\n for line in f:\n data = json.loads(line)\n G = process_line(G, data, vocab)\n G = check_graph(G)\n print('%s has %d Nodes and %d edges' % (file_, len(G), len(G.edges)))\n if not os.path.exists(save_path_graph_base):\n os.mkdir(save_path_graph_base)\n save_path_graph = save_path_graph_base + file_.split('.')[0\n ] + '_graph.pkl'\n nx.write_gpickle(G, save_path_graph)\n",
"step-5": "import json\nimport os\nimport ipdb\nfrom tqdm import tqdm\nimport argparse\nfrom os import listdir\nfrom os.path import isfile, join\nimport pickle\nimport joblib\nfrom collections import Counter\nfrom shutil import copyfile\nimport networkx as nx\nimport spacy\nimport nltk\nimport numpy as np\n\nnltk.download('stopwords')\nnltk_stopwords = nltk.corpus.stopwords.words('english')\ndata_path = '/home/joey.bose/dblp_papers_v11.txt'\nsave_path_base = '/home/joey.bose/aminer_data/'\nload_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'\nsave_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'\nraw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'\nspacy_nlp = spacy.load('en_core_web_sm')\nglove_path = '/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt'\n\nclass Lang:\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0 # Count default tokens\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n # embeddings = np.random.randn(vocab.n_words, emb_size) * 0.01\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if(len(sp) == emb_dim + 1):\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / vocab.n_words))\n return embeddings\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path,\"r\",encoding=\"utf8\") as f:\n for line in tqdm(f,total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\ndef get_node_embed(text,vocab):\n sum_embed = 0\n for word in text:\n embed = embeddings[vocab.word2index[word]]\n sum_embed += embed\n return sum_embed\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print(\"%d Nodes and %d missing nodes in G \" %(total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text =\" \".join(text)\n if args.process_raw:\n with open(raw_save_path,\"a+\") as f:\n f.write(text)\n f.write('\\n')\n\n '''Create Node Embedding if Node doesn't exist '''\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens,vocab)\n\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n\n return G\n\nif __name__ == '__main__':\n \"\"\"\n Create Aminer-Citation v-11 Graphs\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--topk', type=int, default='100')\n parser.add_argument(\"--process_raw\", action=\"store_true\", default=False,\n\t\thelp='Process Raw Data')\n parser.add_argument(\"--make_vocab\", action=\"store_true\", default=False,\n\t\thelp='Create Vocab from the raw abstract data')\n args = parser.parse_args()\n onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(load_path_rank_base, f))]\n vocab = Lang()\n if args.make_vocab:\n process_raw_abstracts(vocab)\n joblib.dump(vocab, \"aminer_100_vocab.pkl\")\n print(\"Done generating vocab\")\n embeddings = gen_embeddings(vocab,file=glove_path,emb_size=300,emb_dim=300)\n joblib.dump(embeddings, \"aminer_100_embed.pkl\")\n print(\"Done\")\n exit()\n else:\n vocab = joblib.load(\"aminer_100_vocab.pkl\")\n embeddings = joblib.load(\"aminer_100_embed.pkl\")\n\n for i, file_ in tqdm(enumerate(onlyfiles),total=len(onlyfiles)):\n file_path = load_path_rank_base + file_\n G = nx.Graph()\n with open(file_path,'r', encoding=\"utf8\") as f:\n for line in f:\n data = json.loads(line)\n G = process_line(G,data,vocab)\n G = check_graph(G)\n print(\"%s has %d Nodes and %d edges\" %(file_,len(G),len(G.edges)))\n if not os.path.exists(save_path_graph_base):\n os.mkdir(save_path_graph_base)\n save_path_graph = save_path_graph_base + file_.split('.')[0] + '_graph.pkl'\n nx.write_gpickle(G,save_path_graph)\n\n",
"step-ids": [
5,
8,
9,
11,
13
]
}
|
[
5,
8,
9,
11,
13
] |
import API.enum as enum
import re
class ObjectValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
def _check_with_nonDuplicateObjectValidator(self, data):
model = data['model']
if model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)
def checkNonDuplicateObject(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_nonDuplicateObjectValidator)
return self
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user},
self._check_with_authenticationValidator)
return self
#\b(?!(\d)\1{3})[13-9]{4}[1346-9][013-9]{5}\b
# postal code validation
class FieldValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ["پلتفرم پرداخت در محل", "باشگاه مشتریان", "درگاه پرداخت اینترنتی"]:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
break
def _check_with_userNameValidator(self, data):
username = re.match(r"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$", data["value"])
if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^09[0-9]{9}$)", mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == "":
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban)!=26 or not iban.startswith("IR"):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:]+iban[:4]
code = code.replace('I','18').replace('R','27')
if int(code)%97!=1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)
def _check_with_username(self, data):
username = re.match(r"^[a-zA-Z0-9_.-]+$", data["value"])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
#############################################################################
def checkType(self, field, type, value="unAssigned"):
self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features="unAssigned"):
self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)
return self
def checkUserName(self, field, username="unAssigned"):
self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)
return self
def checkPhone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)
return self
def checkEmail(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)
return self
def checkNotNone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {
'userId': userId,
'subId': subId
}
self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length,mode='equal', data="unAssigned"):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)
return self
def checkInputData(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)
return self
def checkTelephone(self, field, data="unAssigned"): ##TODO
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)
return self
def checkUsername(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]
self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if "name" in errorKeys :
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
|
normal
|
{
"blob_id": "e8daf03f987c7512ff245bfbe16c447acd6b5986",
"index": 7574,
"step-1": "<mask token>\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n <mask token>\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n <mask token>\n <mask token>\n <mask token>\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n <mask token>\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n <mask token>\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n <mask token>\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-2": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n <mask token>\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n <mask token>\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-3": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n <mask token>\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-4": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-5": "import API.enum as enum\nimport re\n\nclass ObjectValidator():\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({\n 'data': data,\n 'validator': validatorFunction\n })\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter': filter},\n self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter': filter},\n self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user},\n self._check_with_authenticationValidator)\n return self\n\n\n#\\b(?!(\\d)\\1{3})[13-9]{4}[1346-9][013-9]{5}\\b\n# postal code validation\n\n\nclass FieldValidator():\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)\n return self\n def addValidation(self, data, validatorFunction):\n if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({\n 'data': data,\n 'validator': validatorFunction\n })\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in [\"پلتفرم پرداخت در محل\", \"باشگاه مشتریان\", \"درگاه پرداخت اینترنتی\"]:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match(r\"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$\", data[\"value\"])\n if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(r\"(^09[0-9]{9}$)\", mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == \"\":\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban)!=26 or not iban.startswith(\"IR\"):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:]+iban[:4]\n code = code.replace('I','18').replace('R','27')\n if int(code)%97!=1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)\n\n def _check_with_username(self, data):\n username = re.match(r\"^[a-zA-Z0-9_.-]+$\", data[\"value\"])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n #############################################################################\n\n def checkType(self, field, type, value=\"unAssigned\"):\n self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features=\"unAssigned\"):\n self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username=\"unAssigned\"):\n self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {\n 'userId': userId,\n 'subId': subId\n }\n self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length,mode='equal', data=\"unAssigned\"):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n\n self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)\n\n return self\n\n def checkInputData(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data=\"unAssigned\"): ##TODO\n self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]\n self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n\n if \"name\" in errorKeys :\n messages.append('نام را وارد کنید')\n\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n\n if 'phone' in errorKeys:\n messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n\n return messages",
"step-ids": [
40,
58,
62,
63,
65
]
}
|
[
40,
58,
62,
63,
65
] |
from django.test import TestCase
from .models import Post, Category, Tag
# Create your tests here.
class TestPost(TestCase):
def test_str(self):
my_title = Post(title='This is a basic title for a basic test case')
self.assertEquals(str(my_title), 'This is a basic title for a basic test case')
class TestCategory(TestCase):
def test_str(self):
category = Category(name='Test Category')
self.assertEquals(str(category), 'Test Category')
class TestTag(TestCase):
def test_str(self):
tag = Tag(name='Test Tag')
self.assertEquals(str(tag), 'Test Tag')
|
normal
|
{
"blob_id": "825c9510b055c0fa570f577b1c9616e8bde9c98b",
"index": 7653,
"step-1": "<mask token>\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-2": "<mask token>\n\n\nclass TestPost(TestCase):\n <mask token>\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-3": "<mask token>\n\n\nclass TestPost(TestCase):\n\n def test_str(self):\n my_title = Post(title='This is a basic title for a basic test case')\n self.assertEquals(str(my_title),\n 'This is a basic title for a basic test case')\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-4": "from django.test import TestCase\nfrom .models import Post, Category, Tag\n\n\nclass TestPost(TestCase):\n\n def test_str(self):\n my_title = Post(title='This is a basic title for a basic test case')\n self.assertEquals(str(my_title),\n 'This is a basic title for a basic test case')\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-5": "from django.test import TestCase\n\nfrom .models import Post, Category, Tag\n\n# Create your tests here.\n\nclass TestPost(TestCase):\n\n def test_str(self):\n my_title = Post(title='This is a basic title for a basic test case')\n self.assertEquals(str(my_title), 'This is a basic title for a basic test case')\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os , sys , time
print("""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
""")
pla = sys.platform
if pla == "win32":
win = "Windows"
print(" [!] Your Platform is " +win+ "\n")
elif pla == "darwin":
mac = "MacOs"
print(" [+] Your Platform is " +mac+ "\n")
elif pla == "linux":
mac = "Linux"
print(" [+] Your Platform is " +mac+"\n")
if pla == "win32":
print(" [!] Not Suitable For Tool Windows \n")
time.sleep(3)
exit(" [#] https://www.github/arda6")
print("")
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
soru = input("root@eyll:~# ")
if soru == '1':
os.system("python3 main.py")
exit()
elif soru == '2':
os.system("python3 wpa2.py")
elif soru == '3':
os.system("python3 attack.py")
|
normal
|
{
"blob_id": "15eb205e6bd36844fdfc8c05efbc3a3d584c122d",
"index": 7238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\n<mask token>\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\n<mask token>\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-3": "<mask token>\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\npla = sys.platform\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\nsoru = input('root@eyll:~# ')\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-4": "import os, sys, time\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\npla = sys.platform\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\nsoru = input('root@eyll:~# ')\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-5": "import os , sys , time\r\nprint(\"\"\"\r\n\r\n ███████████████████████████████\r\n █ █\r\n █═╬═════════════════════════╬═█\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░coded by arda6░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █═╬═════════════════════════╬═█\r\n █ █\r\n ███████████████████████████████\r\n\r\n\r\n\"\"\")\r\npla = sys.platform\r\nif pla == \"win32\":\r\n win = \"Windows\"\r\n print(\" [!] Your Platform is \" +win+ \"\\n\")\r\nelif pla == \"darwin\":\r\n mac = \"MacOs\"\r\n print(\" [+] Your Platform is \" +mac+ \"\\n\")\r\nelif pla == \"linux\":\r\n mac = \"Linux\"\r\n print(\" [+] Your Platform is \" +mac+\"\\n\")\r\nif pla == \"win32\":\r\n print(\" [!] Not Suitable For Tool Windows \\n\")\r\n time.sleep(3)\r\n exit(\" [#] https://www.github/arda6\")\r\nprint(\"\")\r\nprint(\"\"\"\r\n\r\n 1) Wep Cracking\r\n 2) Wpa2 Cracking\r\n 3) Deauth Attack\r\n \r\n\"\"\")\r\n\r\nsoru = input(\"root@eyll:~# \")\r\nif soru == '1':\r\n os.system(\"python3 main.py\")\r\n exit()\r\nelif soru == '2':\r\n os.system(\"python3 wpa2.py\")\r\nelif soru == '3':\r\n os.system(\"python3 attack.py\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
import torch
from torch import nn
from d2l import torch as d2l
def masked_softmax(X, valid_lens):
"""通过在最后一个轴上掩蔽元素来执行softmax操作"""
# X:3D张量,valid_lens:1D或2D张量
if valid_lens is None:
return nn.functional.softmax(X, dim=-1)
else:
shape = X.shape
if valid_lens.dim() == 1:
valid_lens = torch.repeat_interleave(valid_lens, shape[1])
else:
valid_lens = valid_lens.reshape(-1)
# 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0
X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-1e6)
return nn.functional.softmax(X.reshape(shape), dim=-1)
"""测试softmax掩码"""
print(masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])))
print(masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])))
class AdditiveAttention(nn.Module):
"""加性注意力"""
def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
self.W_v = nn.Linear(num_hiddens, 1, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# 在维度扩展后,
# queries的形状:(batch_size,查询的个数,1,num_hidden)
# keys的形状:(batch_size,1,“键-值”对的个数,num_hiddens)
# 使用广播方式进行求和
features = queries.unsqueeze(2) + keys.unsqueeze(1)
features = torch.tanh(features)
# self.w_v仅有一个输出,因此从形状中移除最后那个维度。
# scores的形状:(batch_size,查询的个数,“键-值”对的个数)
scores = self.W_v(features).squeeze(-1)
print("scores:", scores)
self.attention_weights = masked_softmax(scores, valid_lens)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
return torch.bmm(self.dropout(self.attention_weights), values)
"""加性注意力函数测试"""
queries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))
# values的小批量,两个值矩阵是相同的
values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(2, 1, 1)
valid_lens = torch.tensor([2, 6])
attention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8, dropout=0.1)
attention.eval()
print(attention(queries, keys, values, valid_lens))
class DotProductAttention(nn.Module):
"""缩放点积注意力"""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# queries的形状:(batch_size,查询的个数,d)
# keys的形状:(batch_size,“键-值”对的个数,d)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
# valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# 设置transpose_b=True为了交换的keys的最后两个维度
scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return torch.bmm(self.dropout(self.attention_weights), values)
"""缩放点积注意力函数测试"""
queries = torch.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.eval()
print(attention(queries, keys, values, valid_lens))
|
normal
|
{
"blob_id": "cda01bc7b0ebcfaf010bb87e7d9be34fd310d7a7",
"index": 9626,
"step-1": "<mask token>\n\n\nclass AdditiveAttention(nn.Module):\n <mask token>\n\n def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):\n super(AdditiveAttention, self).__init__(**kwargs)\n self.W_k = nn.Linear(key_size, num_hiddens, bias=False)\n self.W_q = nn.Linear(query_size, num_hiddens, bias=False)\n self.W_v = nn.Linear(num_hiddens, 1, bias=False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens):\n queries, keys = self.W_q(queries), self.W_k(keys)\n features = queries.unsqueeze(2) + keys.unsqueeze(1)\n features = torch.tanh(features)\n scores = self.W_v(features).squeeze(-1)\n print('scores:', scores)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\n\n\nclass DotProductAttention(nn.Module):\n \"\"\"缩放点积注意力\"\"\"\n\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1]\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef masked_softmax(X, valid_lens):\n \"\"\"通过在最后一个轴上掩蔽元素来执行softmax操作\"\"\"\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-\n 1000000.0)\n return nn.functional.softmax(X.reshape(shape), dim=-1)\n\n\n<mask token>\n\n\nclass AdditiveAttention(nn.Module):\n \"\"\"加性注意力\"\"\"\n\n def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):\n super(AdditiveAttention, self).__init__(**kwargs)\n self.W_k = nn.Linear(key_size, num_hiddens, bias=False)\n self.W_q = nn.Linear(query_size, num_hiddens, bias=False)\n self.W_v = nn.Linear(num_hiddens, 1, bias=False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens):\n queries, keys = self.W_q(queries), self.W_k(keys)\n features = queries.unsqueeze(2) + keys.unsqueeze(1)\n features = torch.tanh(features)\n scores = self.W_v(features).squeeze(-1)\n print('scores:', scores)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\n\n\nclass DotProductAttention(nn.Module):\n \"\"\"缩放点积注意力\"\"\"\n\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1]\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef masked_softmax(X, valid_lens):\n \"\"\"通过在最后一个轴上掩蔽元素来执行softmax操作\"\"\"\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-\n 1000000.0)\n return nn.functional.softmax(X.reshape(shape), dim=-1)\n\n\n<mask token>\nprint(masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])))\nprint(masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])))\n\n\nclass AdditiveAttention(nn.Module):\n \"\"\"加性注意力\"\"\"\n\n def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):\n super(AdditiveAttention, self).__init__(**kwargs)\n self.W_k = nn.Linear(key_size, num_hiddens, bias=False)\n self.W_q = nn.Linear(query_size, num_hiddens, bias=False)\n self.W_v = nn.Linear(num_hiddens, 1, bias=False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens):\n queries, keys = self.W_q(queries), self.W_k(keys)\n features = queries.unsqueeze(2) + keys.unsqueeze(1)\n features = torch.tanh(features)\n scores = self.W_v(features).squeeze(-1)\n print('scores:', scores)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\nattention.eval()\nprint(attention(queries, keys, values, valid_lens))\n\n\nclass DotProductAttention(nn.Module):\n \"\"\"缩放点积注意力\"\"\"\n\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1]\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\nattention.eval()\nprint(attention(queries, keys, values, valid_lens))\n",
"step-4": "<mask token>\n\n\ndef masked_softmax(X, valid_lens):\n \"\"\"通过在最后一个轴上掩蔽元素来执行softmax操作\"\"\"\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-\n 1000000.0)\n return nn.functional.softmax(X.reshape(shape), dim=-1)\n\n\n<mask token>\nprint(masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])))\nprint(masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])))\n\n\nclass AdditiveAttention(nn.Module):\n \"\"\"加性注意力\"\"\"\n\n def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):\n super(AdditiveAttention, self).__init__(**kwargs)\n self.W_k = nn.Linear(key_size, num_hiddens, bias=False)\n self.W_q = nn.Linear(query_size, num_hiddens, bias=False)\n self.W_v = nn.Linear(num_hiddens, 1, bias=False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens):\n queries, keys = self.W_q(queries), self.W_k(keys)\n features = queries.unsqueeze(2) + keys.unsqueeze(1)\n features = torch.tanh(features)\n scores = self.W_v(features).squeeze(-1)\n print('scores:', scores)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\nqueries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))\nvalues = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(2, 1, 1\n )\nvalid_lens = torch.tensor([2, 6])\nattention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8,\n dropout=0.1)\nattention.eval()\nprint(attention(queries, keys, values, valid_lens))\n\n\nclass DotProductAttention(nn.Module):\n \"\"\"缩放点积注意力\"\"\"\n\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1]\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n<mask token>\nqueries = torch.normal(0, 1, (2, 1, 2))\nattention = DotProductAttention(dropout=0.5)\nattention.eval()\nprint(attention(queries, keys, values, valid_lens))\n",
"step-5": "import math\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\n\n\ndef masked_softmax(X, valid_lens):\n \"\"\"通过在最后一个轴上掩蔽元素来执行softmax操作\"\"\"\n # X:3D张量,valid_lens:1D或2D张量\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n # 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0\n X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-1e6)\n return nn.functional.softmax(X.reshape(shape), dim=-1)\n\n\n\"\"\"测试softmax掩码\"\"\"\nprint(masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])))\nprint(masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])))\n\n\nclass AdditiveAttention(nn.Module):\n \"\"\"加性注意力\"\"\"\n def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):\n super(AdditiveAttention, self).__init__(**kwargs)\n self.W_k = nn.Linear(key_size, num_hiddens, bias=False)\n self.W_q = nn.Linear(query_size, num_hiddens, bias=False)\n self.W_v = nn.Linear(num_hiddens, 1, bias=False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens):\n queries, keys = self.W_q(queries), self.W_k(keys)\n # 在维度扩展后,\n # queries的形状:(batch_size,查询的个数,1,num_hidden)\n # keys的形状:(batch_size,1,“键-值”对的个数,num_hiddens)\n # 使用广播方式进行求和\n features = queries.unsqueeze(2) + keys.unsqueeze(1)\n features = torch.tanh(features)\n # self.w_v仅有一个输出,因此从形状中移除最后那个维度。\n # scores的形状:(batch_size,查询的个数,“键-值”对的个数)\n scores = self.W_v(features).squeeze(-1)\n print(\"scores:\", scores)\n self.attention_weights = masked_softmax(scores, valid_lens)\n # values的形状:(batch_size,“键-值”对的个数,值的维度)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n\"\"\"加性注意力函数测试\"\"\"\nqueries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))\n# values的小批量,两个值矩阵是相同的\nvalues = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(2, 1, 1)\nvalid_lens = torch.tensor([2, 6])\n\nattention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8, dropout=0.1)\nattention.eval()\nprint(attention(queries, keys, values, valid_lens))\n\n\nclass DotProductAttention(nn.Module):\n \"\"\"缩放点积注意力\"\"\"\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n # queries的形状:(batch_size,查询的个数,d)\n # keys的形状:(batch_size,“键-值”对的个数,d)\n # values的形状:(batch_size,“键-值”对的个数,值的维度)\n # valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1]\n # 设置transpose_b=True为了交换的keys的最后两个维度\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\n\"\"\"缩放点积注意力函数测试\"\"\"\nqueries = torch.normal(0, 1, (2, 1, 2))\nattention = DotProductAttention(dropout=0.5)\nattention.eval()\nprint(attention(queries, keys, values, valid_lens))\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
###########################################################
# 2019-02-07: 删除了marginalized prior
#
###########################################################
import sys,os
import numpy as np
import matplotlib.pylab as plt
from scipy.linalg import eig
from scipy.stats import norm, kstest, normaltest
# use default colors defined by MatPlotlib
colors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b']
###########################################################
fig = plt.figure(figsize=(11,4))
###########################################################
# 1) histograms of the normalized dmu(zi). The purpose is
# to show that the mock sample is not too peculiar
###########################################################
def read_jla_mock( mock_filename ):
fp = open(mock_filename,'r')
lines = fp.readlines()
fp.close()
jla = []
for line in lines:
sn = line.split()
temp = []
temp.append(float(sn[1]))
temp.append(float(sn[2]))
temp.append(float(sn[3]))
temp.append(float(sn[4]))
jla.append(temp)
return np.array(jla)
# jla = read_jla_mock('MOCK_JLA_51.txt')
# eos_SP = np.loadtxt('eos_51.txt')
# jla = read_jla_mock('MOCK_JLA_16.txt')
# eos_SP = np.loadtxt('eos_16.txt')
# jla = read_jla_mock('MOCK_JLA_10.txt')
# eos_SP = np.loadtxt('eos_10.txt')
# jla = read_jla_mock('MOCK_JLA_9.txt')
# eos_SP = np.loadtxt('eos_9.txt')
# jla = read_jla_mock('MOCK_JLA_30.txt')
# eos_SP = np.loadtxt('eos_30.txt')
# jla = read_jla_mock('MOCK_JLA_3.txt')
# eos_SP = np.loadtxt('eos_3.txt')
jla = read_jla_mock('MOCK_JLA_40.txt')
eos_SP = np.loadtxt('eos_40.txt')
eos_no_prior = np.loadtxt('eos_no_prior.txt')
eos_no_prior2 = np.loadtxt('eos_no_prior2.txt')
z = jla[:,0]
dmu = (jla[:,1]-jla[:,3])/jla[:,2] # normalize the errors
nbin_all = 15
nbin_1 = 15
nbin_2 = 15
z1 = 0.2
z2 = 0.6
ID1 = (z < z1 )
ID2 = (z >= z2 )
p = round(kstest(dmu,cdf='norm')[1],2)
p1 = round(kstest(dmu[ID1],'norm')[1],2)
p2 = round(kstest(dmu[ID2],'norm')[1],2)
plt.subplot(1,2,1)
ax = plt.gca()
rwidth=0.6
ax.hist(dmu, bins=nbin_all, label=r'ALL ' + r' p = '+str(p), alpha=0.5, rwidth=rwidth, color=colors[0])
ax.hist(dmu[ID1], bins=nbin_1, label=r'$z<' + str(z1) + '$' + r' p = '+str(p1)+'0', alpha=0.7, rwidth=rwidth, color=colors[1])
ax.hist(dmu[ID2], bins=nbin_2, label=r'$z>' + str(z2) + '$' + r' p = '+str(p2), alpha=0.8, rwidth=rwidth, color=colors[2])
ax.set_xlim(-3.5,3.5)
ax.set_xticks([-3,-2,-1,0,1,2,3])
ax.set_xticklabels([-3,-2,-1,0,1,2,3],fontsize=14)
ax.set_xlabel(r'$\widetilde{\Delta\mu}$',fontsize=14)
yticks = [0,50,100,150]
ax.set_ylim(0,170)
ax.set_yticks(yticks)
ax.set_yticklabels(yticks,fontsize=14)
ax.set_ylabel(r'Counts',fontsize=14)
ax.tick_params(axis='both',direction='in')
lgd=ax.legend(loc='upper left',fontsize=13,frameon=False)
texts = lgd.get_texts()
for i in range(len(texts)):
plt.setp(texts[i],color=colors[i])
###########################################################
# 3) reconstructed EoS
###########################################################
plt.subplot(1,2,2)
ax = plt.gca()
a = np.linspace(1,.4,20)
z = 1/a-1
colors=['blue','red','gray']
ax.hlines(-1,xmin=0,xmax=1.5,linestyle='dashed',lw=2,alpha=1,color=colors[0],label=r'Fiducal model')
# EoS result with prior enforced
ax.errorbar(z,eos_SP[:,0],yerr=[eos_SP[:,0]-eos_SP[:,2],eos_SP[:,3]-eos_SP[:,0]],
marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Prior enforced')
# ax.errorbar(z,eos_SP[:,0],yerr=eos_SP[:,1],
# marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Reconstruction')
# EoS result without prior
# ax.errorbar(z,eos_no_prior[:,0],yerr=[eos_no_prior[:,0]-eos_no_prior[:,2],eos_no_prior[:,3]-eos_no_prior[:,0]],
# marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')
# ax.errorbar(z,eos_no_prior[:,0],yerr=eos_no_prior[:,1],
# marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')
ax.plot(z,eos_no_prior[:,0],'--',lw=2.5,color=colors[2])
# ax.fill_between(z,y1=eos_no_prior[:,0]-eos_no_prior[:,1],y2=eos_no_prior[:,0]+eos_no_prior[:,1],
# color=colors[2],alpha=0.5,label=r'Without prior')
ax.fill_between(z,y1=eos_no_prior[:,2],y2=eos_no_prior[:,3],
color=colors[2],label=r'Reconstruction without prior')
# ax.fill_between(z,y1=eos_no_prior2[:,0]-eos_no_prior2[:,1],y2=eos_no_prior2[:,0]+eos_no_prior2[:,1],
# color='g',alpha=0.5,label=r'Reconstruction without prior')
# ax.fill_between(z,y1=eos_no_prior2[:,2],y2=eos_no_prior2[:,3],
# color='g',alpha=0.5,label=r'Reconstruction without prior')
ax.set_xlim(-0.025,1.525)
ax.set_xticks([0,0.25,0.5,0.75,1.0,1.25,1.5])
ax.set_xticklabels([0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=14)
ax.set_xlabel(r'$z$',fontsize=14)
yticks=[-3,-2,-1,-0]
ax.set_yticks(yticks)
ax.set_yticklabels(yticks,fontsize=14)
ax.set_ylabel(r'$w(z)$',fontsize=14)
# lgd=ax.legend(loc='lower left',frameon=False,fontsize=14)
ax.tick_params(axis='both',direction='in')
# texts = lgd.get_texts()
# for i in range(len(texts)):
# plt.setp(texts[i],color=colors[i])
handles,labels = ax.get_legend_handles_labels()
handles = [handles[0], handles[2], handles[1]]
labels = [labels[0], labels[2], labels[1]]
lgd=ax.legend(handles,labels,loc='lower left',frameon=False,fontsize=14)
# lgd=legend(loc='upper left',frameon=False,fontsize=12)
texts = lgd.get_texts()
cid = [0,2,1]
for i in range(len(texts)):
plt.setp(texts[i],fontsize=14,color=colors[i])
# add reduced chisq
dof = 719
chisq_red = 876.39/dof
ax.text(0.05,-2,r'$\chi^2_{\rm reduced} = '+str(round(chisq_red,2))+'$',fontsize=14,color='r')
###########################################################
# final adjustments ...
plt.subplots_adjust(wspace=0.15,
hspace=0.25,
left=0.065,
right=0.985,
top=0.975,
bottom=0.175)
plt.savefig('example_eos_result.pdf')
plt.show()
|
normal
|
{
"blob_id": "ac35672661e1dd0b97567ae4335f537dc69f98f7",
"index": 6240,
"step-1": "<mask token>\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\n<mask token>\nplt.subplot(1, 2, 1)\n<mask token>\nax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5,\n rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str(\n p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str(\n p2), alpha=0.8, rwidth=rwidth, color=colors[2])\nax.set_xlim(-3.5, 3.5)\nax.set_xticks([-3, -2, -1, 0, 1, 2, 3])\nax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14)\nax.set_xlabel('$\\\\widetilde{\\\\Delta\\\\mu}$', fontsize=14)\n<mask token>\nax.set_ylim(0, 170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('Counts', fontsize=14)\nax.tick_params(axis='both', direction='in')\n<mask token>\nfor i in range(len(texts)):\n plt.setp(texts[i], color=colors[i])\nplt.subplot(1, 2, 2)\n<mask token>\nax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color=\n colors[0], label='Fiducal model')\nax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3\n ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3,\n capthick=2, color=colors[1], label='Prior enforced')\nax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2])\nax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color=\n colors[2], label='Reconstruction without prior')\nax.set_xlim(-0.025, 1.525)\nax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\nax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14)\nax.set_xlabel('$z$', fontsize=14)\n<mask token>\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('$w(z)$', fontsize=14)\nax.tick_params(axis='both', direction='in')\n<mask token>\nfor i in range(len(texts)):\n plt.setp(texts[i], fontsize=14, color=colors[i])\n<mask token>\nax.text(0.05, -2, '$\\\\chi^2_{\\\\rm reduced} = ' + str(round(chisq_red, 2)) +\n '$', fontsize=14, color='r')\nplt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=\n 0.975, bottom=0.175)\nplt.savefig('example_eos_result.pdf')\nplt.show()\n",
"step-3": "<mask token>\ncolors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',\n u'#8c564b']\nfig = plt.figure(figsize=(11, 4))\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\njla = read_jla_mock('MOCK_JLA_40.txt')\neos_SP = np.loadtxt('eos_40.txt')\neos_no_prior = np.loadtxt('eos_no_prior.txt')\neos_no_prior2 = np.loadtxt('eos_no_prior2.txt')\nz = jla[:, 0]\ndmu = (jla[:, 1] - jla[:, 3]) / jla[:, 2]\nnbin_all = 15\nnbin_1 = 15\nnbin_2 = 15\nz1 = 0.2\nz2 = 0.6\nID1 = z < z1\nID2 = z >= z2\np = round(kstest(dmu, cdf='norm')[1], 2)\np1 = round(kstest(dmu[ID1], 'norm')[1], 2)\np2 = round(kstest(dmu[ID2], 'norm')[1], 2)\nplt.subplot(1, 2, 1)\nax = plt.gca()\nrwidth = 0.6\nax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5,\n rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str(\n p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str(\n p2), alpha=0.8, rwidth=rwidth, color=colors[2])\nax.set_xlim(-3.5, 3.5)\nax.set_xticks([-3, -2, -1, 0, 1, 2, 3])\nax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14)\nax.set_xlabel('$\\\\widetilde{\\\\Delta\\\\mu}$', fontsize=14)\nyticks = [0, 50, 100, 150]\nax.set_ylim(0, 170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('Counts', fontsize=14)\nax.tick_params(axis='both', direction='in')\nlgd = ax.legend(loc='upper left', fontsize=13, frameon=False)\ntexts = lgd.get_texts()\nfor i in range(len(texts)):\n plt.setp(texts[i], color=colors[i])\nplt.subplot(1, 2, 2)\nax = plt.gca()\na = np.linspace(1, 0.4, 20)\nz = 1 / a - 1\ncolors = ['blue', 'red', 'gray']\nax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color=\n colors[0], label='Fiducal model')\nax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3\n ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3,\n capthick=2, color=colors[1], label='Prior enforced')\nax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2])\nax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color=\n colors[2], label='Reconstruction without prior')\nax.set_xlim(-0.025, 1.525)\nax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\nax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14)\nax.set_xlabel('$z$', fontsize=14)\nyticks = [-3, -2, -1, -0]\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('$w(z)$', fontsize=14)\nax.tick_params(axis='both', direction='in')\nhandles, labels = ax.get_legend_handles_labels()\nhandles = [handles[0], handles[2], handles[1]]\nlabels = [labels[0], labels[2], labels[1]]\nlgd = ax.legend(handles, labels, loc='lower left', frameon=False, fontsize=14)\ntexts = lgd.get_texts()\ncid = [0, 2, 1]\nfor i in range(len(texts)):\n plt.setp(texts[i], fontsize=14, color=colors[i])\ndof = 719\nchisq_red = 876.39 / dof\nax.text(0.05, -2, '$\\\\chi^2_{\\\\rm reduced} = ' + str(round(chisq_red, 2)) +\n '$', fontsize=14, color='r')\nplt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=\n 0.975, bottom=0.175)\nplt.savefig('example_eos_result.pdf')\nplt.show()\n",
"step-4": "import sys, os\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.linalg import eig\nfrom scipy.stats import norm, kstest, normaltest\ncolors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',\n u'#8c564b']\nfig = plt.figure(figsize=(11, 4))\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\njla = read_jla_mock('MOCK_JLA_40.txt')\neos_SP = np.loadtxt('eos_40.txt')\neos_no_prior = np.loadtxt('eos_no_prior.txt')\neos_no_prior2 = np.loadtxt('eos_no_prior2.txt')\nz = jla[:, 0]\ndmu = (jla[:, 1] - jla[:, 3]) / jla[:, 2]\nnbin_all = 15\nnbin_1 = 15\nnbin_2 = 15\nz1 = 0.2\nz2 = 0.6\nID1 = z < z1\nID2 = z >= z2\np = round(kstest(dmu, cdf='norm')[1], 2)\np1 = round(kstest(dmu[ID1], 'norm')[1], 2)\np2 = round(kstest(dmu[ID2], 'norm')[1], 2)\nplt.subplot(1, 2, 1)\nax = plt.gca()\nrwidth = 0.6\nax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5,\n rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str(\n p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str(\n p2), alpha=0.8, rwidth=rwidth, color=colors[2])\nax.set_xlim(-3.5, 3.5)\nax.set_xticks([-3, -2, -1, 0, 1, 2, 3])\nax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14)\nax.set_xlabel('$\\\\widetilde{\\\\Delta\\\\mu}$', fontsize=14)\nyticks = [0, 50, 100, 150]\nax.set_ylim(0, 170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('Counts', fontsize=14)\nax.tick_params(axis='both', direction='in')\nlgd = ax.legend(loc='upper left', fontsize=13, frameon=False)\ntexts = lgd.get_texts()\nfor i in range(len(texts)):\n plt.setp(texts[i], color=colors[i])\nplt.subplot(1, 2, 2)\nax = plt.gca()\na = np.linspace(1, 0.4, 20)\nz = 1 / a - 1\ncolors = ['blue', 'red', 'gray']\nax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color=\n colors[0], label='Fiducal model')\nax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3\n ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3,\n capthick=2, color=colors[1], label='Prior enforced')\nax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2])\nax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color=\n colors[2], label='Reconstruction without prior')\nax.set_xlim(-0.025, 1.525)\nax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\nax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14)\nax.set_xlabel('$z$', fontsize=14)\nyticks = [-3, -2, -1, -0]\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('$w(z)$', fontsize=14)\nax.tick_params(axis='both', direction='in')\nhandles, labels = ax.get_legend_handles_labels()\nhandles = [handles[0], handles[2], handles[1]]\nlabels = [labels[0], labels[2], labels[1]]\nlgd = ax.legend(handles, labels, loc='lower left', frameon=False, fontsize=14)\ntexts = lgd.get_texts()\ncid = [0, 2, 1]\nfor i in range(len(texts)):\n plt.setp(texts[i], fontsize=14, color=colors[i])\ndof = 719\nchisq_red = 876.39 / dof\nax.text(0.05, -2, '$\\\\chi^2_{\\\\rm reduced} = ' + str(round(chisq_red, 2)) +\n '$', fontsize=14, color='r')\nplt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=\n 0.975, bottom=0.175)\nplt.savefig('example_eos_result.pdf')\nplt.show()\n",
"step-5": "\n###########################################################\n# 2019-02-07: 删除了marginalized prior\n#\n###########################################################\n\nimport sys,os\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.linalg import eig\nfrom scipy.stats import norm, kstest, normaltest\n\n# use default colors defined by MatPlotlib\ncolors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b']\n\n###########################################################\n\nfig = plt.figure(figsize=(11,4))\n\n###########################################################\n# 1) histograms of the normalized dmu(zi). The purpose is\n# to show that the mock sample is not too peculiar\n###########################################################\n\ndef read_jla_mock( mock_filename ):\n\tfp = open(mock_filename,'r')\n\tlines = fp.readlines()\n\tfp.close()\n\n\tjla = []\n\tfor line in lines:\n\t\tsn = line.split()\n\t\ttemp = []\n\t\ttemp.append(float(sn[1]))\n\t\ttemp.append(float(sn[2]))\n\t\ttemp.append(float(sn[3]))\n\t\ttemp.append(float(sn[4]))\n\t\tjla.append(temp)\n\n\treturn np.array(jla)\n\n# jla = read_jla_mock('MOCK_JLA_51.txt')\n# eos_SP = np.loadtxt('eos_51.txt')\n\n# jla = read_jla_mock('MOCK_JLA_16.txt')\n# eos_SP = np.loadtxt('eos_16.txt')\n\n# jla = read_jla_mock('MOCK_JLA_10.txt')\n# eos_SP = np.loadtxt('eos_10.txt')\n\n# jla = read_jla_mock('MOCK_JLA_9.txt')\n# eos_SP = np.loadtxt('eos_9.txt')\n\n# jla = read_jla_mock('MOCK_JLA_30.txt')\n# eos_SP = np.loadtxt('eos_30.txt')\n\n# jla = read_jla_mock('MOCK_JLA_3.txt')\n# eos_SP = np.loadtxt('eos_3.txt')\n\njla = read_jla_mock('MOCK_JLA_40.txt')\neos_SP = np.loadtxt('eos_40.txt')\neos_no_prior = np.loadtxt('eos_no_prior.txt')\neos_no_prior2 = np.loadtxt('eos_no_prior2.txt')\n\n\nz = jla[:,0]\ndmu = (jla[:,1]-jla[:,3])/jla[:,2] # normalize the errors\n\nnbin_all = 15\nnbin_1 = 15\nnbin_2 = 15\nz1 = 0.2\nz2 = 0.6\nID1 = (z < z1 )\nID2 = (z >= z2 )\n\np = round(kstest(dmu,cdf='norm')[1],2)\np1 = round(kstest(dmu[ID1],'norm')[1],2)\np2 = round(kstest(dmu[ID2],'norm')[1],2)\n\nplt.subplot(1,2,1)\nax = plt.gca()\n\nrwidth=0.6\nax.hist(dmu, bins=nbin_all, label=r'ALL ' + r' p = '+str(p), alpha=0.5, rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label=r'$z<' + str(z1) + '$' + r' p = '+str(p1)+'0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label=r'$z>' + str(z2) + '$' + r' p = '+str(p2), alpha=0.8, rwidth=rwidth, color=colors[2])\n\n\nax.set_xlim(-3.5,3.5)\nax.set_xticks([-3,-2,-1,0,1,2,3])\nax.set_xticklabels([-3,-2,-1,0,1,2,3],fontsize=14)\nax.set_xlabel(r'$\\widetilde{\\Delta\\mu}$',fontsize=14)\n\nyticks = [0,50,100,150]\nax.set_ylim(0,170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks,fontsize=14)\nax.set_ylabel(r'Counts',fontsize=14)\n\nax.tick_params(axis='both',direction='in')\n\nlgd=ax.legend(loc='upper left',fontsize=13,frameon=False)\ntexts = lgd.get_texts()\nfor i in range(len(texts)):\n\tplt.setp(texts[i],color=colors[i])\n\n###########################################################\n# 3) reconstructed EoS\n###########################################################\nplt.subplot(1,2,2)\nax = plt.gca()\n\na = np.linspace(1,.4,20)\nz = 1/a-1\n\ncolors=['blue','red','gray']\nax.hlines(-1,xmin=0,xmax=1.5,linestyle='dashed',lw=2,alpha=1,color=colors[0],label=r'Fiducal model')\n\n# EoS result with prior enforced\nax.errorbar(z,eos_SP[:,0],yerr=[eos_SP[:,0]-eos_SP[:,2],eos_SP[:,3]-eos_SP[:,0]],\n\t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Prior enforced')\n\n# ax.errorbar(z,eos_SP[:,0],yerr=eos_SP[:,1],\n# \t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Reconstruction')\n\n# EoS result without prior\n# ax.errorbar(z,eos_no_prior[:,0],yerr=[eos_no_prior[:,0]-eos_no_prior[:,2],eos_no_prior[:,3]-eos_no_prior[:,0]],\n# \t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')\n\n# ax.errorbar(z,eos_no_prior[:,0],yerr=eos_no_prior[:,1],\n# \t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')\n\nax.plot(z,eos_no_prior[:,0],'--',lw=2.5,color=colors[2])\n# ax.fill_between(z,y1=eos_no_prior[:,0]-eos_no_prior[:,1],y2=eos_no_prior[:,0]+eos_no_prior[:,1],\n# \t\t\tcolor=colors[2],alpha=0.5,label=r'Without prior')\n\nax.fill_between(z,y1=eos_no_prior[:,2],y2=eos_no_prior[:,3],\n\t\t\tcolor=colors[2],label=r'Reconstruction without prior')\n\n# ax.fill_between(z,y1=eos_no_prior2[:,0]-eos_no_prior2[:,1],y2=eos_no_prior2[:,0]+eos_no_prior2[:,1],\n# \t\t\tcolor='g',alpha=0.5,label=r'Reconstruction without prior')\n\n# ax.fill_between(z,y1=eos_no_prior2[:,2],y2=eos_no_prior2[:,3],\n# \t\t\tcolor='g',alpha=0.5,label=r'Reconstruction without prior')\n\n\nax.set_xlim(-0.025,1.525)\nax.set_xticks([0,0.25,0.5,0.75,1.0,1.25,1.5])\nax.set_xticklabels([0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=14)\nax.set_xlabel(r'$z$',fontsize=14)\n\nyticks=[-3,-2,-1,-0]\nax.set_yticks(yticks)\nax.set_yticklabels(yticks,fontsize=14)\nax.set_ylabel(r'$w(z)$',fontsize=14)\n\n# lgd=ax.legend(loc='lower left',frameon=False,fontsize=14)\nax.tick_params(axis='both',direction='in')\n\n\n# texts = lgd.get_texts()\n# for i in range(len(texts)):\n# \tplt.setp(texts[i],color=colors[i])\n\nhandles,labels = ax.get_legend_handles_labels()\nhandles = [handles[0], handles[2], handles[1]]\nlabels = [labels[0], labels[2], labels[1]]\n\nlgd=ax.legend(handles,labels,loc='lower left',frameon=False,fontsize=14)\n# lgd=legend(loc='upper left',frameon=False,fontsize=12)\ntexts = lgd.get_texts()\ncid = [0,2,1]\nfor i in range(len(texts)):\n\tplt.setp(texts[i],fontsize=14,color=colors[i])\n\n\n# add reduced chisq\ndof = 719\nchisq_red = 876.39/dof\nax.text(0.05,-2,r'$\\chi^2_{\\rm reduced} = '+str(round(chisq_red,2))+'$',fontsize=14,color='r')\n\n###########################################################\n# final adjustments ...\nplt.subplots_adjust(wspace=0.15,\n hspace=0.25,\n left=0.065,\n right=0.985,\n top=0.975,\n bottom=0.175)\n\nplt.savefig('example_eos_result.pdf')\nplt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
class interceptThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.curPkt = None
self.seq = 0
self.foundUAV = False
def run(self):
sniff(prn=self.interceptPkt, filter='udp port 5556')
def interceptPkt(self, pkt):
if self.foundUAV == False:
print('[*] UAV Found.')
self.foundUAV = True
self.curPkt = pkt
raw = pkt.sprintf('%Raw.load%')
try:
self.seq = int(raw.split(',')[0].split('=')[-1]) + 5
except:
self.seq = 0
def injectCmd(self, cmd):
radio = dup.dupRadio(self.curPkt)
dot11 = dup.dupDot11(self.curPkt)
snap = dup.dupSNAP(self.curPkt)
llc = dup.dupLLC(self.curPkt)
ip = dup.dupIP(self.curPkt)
udp = dup.dupUDP(self.curPkt)
raw = Raw(load=cmd)
injectPkt = radio / dot11 / llc / snap / ip / udp / raw
sendp(injectPkt)
EMER = '290717952'
def emergencyland(self):
spoofSeq = self.seq + 100
watch = 'AT*COMWDG=%i\r'%spoofSeq
toCmd = 'AT*REF=%i,%s\r'% (spoofSeq + 1, EMER)
self.injectCmd(watch)
self.injectCmd(toCmd)
|
normal
|
{
"blob_id": "d9908d1ff155390dcd456dd15f92db03f093089e",
"index": 8146,
"step-1": "#!/usr/bin/env python3\n\nclass interceptThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.curPkt = None\n self.seq = 0\n self.foundUAV = False\n def run(self):\n sniff(prn=self.interceptPkt, filter='udp port 5556')\n def interceptPkt(self, pkt):\n if self.foundUAV == False:\n print('[*] UAV Found.')\n self.foundUAV = True\n self.curPkt = pkt\n raw = pkt.sprintf('%Raw.load%')\n try:\n self.seq = int(raw.split(',')[0].split('=')[-1]) + 5\n except:\n self.seq = 0\n def injectCmd(self, cmd):\n radio = dup.dupRadio(self.curPkt)\n dot11 = dup.dupDot11(self.curPkt)\n snap = dup.dupSNAP(self.curPkt)\n llc = dup.dupLLC(self.curPkt)\n ip = dup.dupIP(self.curPkt)\n udp = dup.dupUDP(self.curPkt)\n raw = Raw(load=cmd)\n injectPkt = radio / dot11 / llc / snap / ip / udp / raw\n sendp(injectPkt)\nEMER = '290717952'\n def emergencyland(self):\n spoofSeq = self.seq + 100\n watch = 'AT*COMWDG=%i\\r'%spoofSeq\n toCmd = 'AT*REF=%i,%s\\r'% (spoofSeq + 1, EMER)\n self.injectCmd(watch)\n self.injectCmd(toCmd)\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + ' ' * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + ' ' * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
def printsort2(x):
for i in range(len(x) - 1):
for j in range(1 + i, len(x)):
if x[i] > x[j]:
x[i], x[j] = x[j], x[i]
for a in x:
print(a, end=' ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def getmin(a, b, c):
if a <= b and a <= c:
print(a)
elif b <= a and b <= c:
print(b)
else:
print(c)
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + ' ' * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
def printsort2(x):
for i in range(len(x) - 1):
for j in range(1 + i, len(x)):
if x[i] > x[j]:
x[i], x[j] = x[j], x[i]
for a in x:
print(a, end=' ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def getmin(a, b, c):
if a <= b and a <= c:
print(a)
elif b <= a and b <= c:
print(b)
else:
print(c)
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + ' ' * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
def printsort2(x):
for i in range(len(x) - 1):
for j in range(1 + i, len(x)):
if x[i] > x[j]:
x[i], x[j] = x[j], x[i]
for a in x:
print(a, end=' ')
def print_hell(inp):
if '안녕' in inp:
print('Hello')
<|reserved_special_token_1|>
def getmin(a, b, c):
if a <= b and a <= c:
print(a)
elif b <= a and b <= c:
print(b)
else:
print(c)
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + " " * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
def printsort2(x):
for i in range(len(x) - 1):
for j in range(1 + i, len(x)):
if x[i] > x[j]:
x[i], x[j] = x[j], x[i]
for a in x:
print(a, end=" ")
def print_hell(inp):
if "안녕" in inp:
print("Hello")
|
flexible
|
{
"blob_id": "917241482dc1f234d5fae9c107a5f21b018fe6d4",
"index": 9843,
"step-1": "<mask token>\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\n<mask token>\n",
"step-3": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\n<mask token>\n",
"step-4": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\ndef print_hell(inp):\n if '안녕' in inp:\n print('Hello')\n",
"step-5": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + \" \" * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=\" \")\n\n\ndef print_hell(inp):\n if \"안녕\" in inp:\n print(\"Hello\")\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
random.seed(int(sys.argv[3]))
<|reserved_special_token_0|>
print('%d' % n)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
randmin = int(sys.argv[1])
randmax = int(sys.argv[2])
random.seed(int(sys.argv[3]))
n = random.randint(randmin, randmax)
print('%d' % n)
<|reserved_special_token_1|>
from __future__ import print_function
from __future__ import division
import subprocess
import random
import math
import sys
import string
randmin = int(sys.argv[1])
randmax = int(sys.argv[2])
random.seed(int(sys.argv[3]))
n = random.randint(randmin, randmax)
print('%d' % n)
<|reserved_special_token_1|>
#!/usr/bin/env pypy
from __future__ import print_function
from __future__ import division
import subprocess
import random
import math
import sys
import string
randmin = int(sys.argv[1])
randmax = int(sys.argv[2])
random.seed(int(sys.argv[3]))
n = random.randint(randmin, randmax)
print('%d' % n)
|
flexible
|
{
"blob_id": "83e1c86095de88692d0116f7e32bd485ab381b29",
"index": 7040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrandom.seed(int(sys.argv[3]))\n<mask token>\nprint('%d' % n)\n",
"step-3": "<mask token>\nrandmin = int(sys.argv[1])\nrandmax = int(sys.argv[2])\nrandom.seed(int(sys.argv[3]))\nn = random.randint(randmin, randmax)\nprint('%d' % n)\n",
"step-4": "from __future__ import print_function\nfrom __future__ import division\nimport subprocess\nimport random\nimport math\nimport sys\nimport string\nrandmin = int(sys.argv[1])\nrandmax = int(sys.argv[2])\nrandom.seed(int(sys.argv[3]))\nn = random.randint(randmin, randmax)\nprint('%d' % n)\n",
"step-5": "#!/usr/bin/env pypy\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport subprocess\nimport random\nimport math\nimport sys\nimport string\n\nrandmin = int(sys.argv[1])\nrandmax = int(sys.argv[2])\nrandom.seed(int(sys.argv[3]))\n\nn = random.randint(randmin, randmax)\n\nprint('%d' % n)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
import calendar
import re
def cardinal(ordinal):
return int(''.join([char for char in ordinal if char.isdigit()]))
def meetup_day(year, month, day_of_week, ordinal):
days = {
0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'
}
possible_days = []
number_of_days = calendar.monthrange(year, month)[1]
days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)]
for day in days_of_month:
if days[day.weekday()] == day_of_week:
possible_days.append(day.day)
if ordinal == 'teenth':
for x in possible_days:
if 10 < x < 20:
day_of_month = x
elif ordinal == 'last':
day_of_month = possible_days[-1]
else:
day_of_month = possible_days[cardinal(ordinal)-1]
return datetime.date(year, month, day_of_month)
|
normal
|
{
"blob_id": "d4b1b6bdf125f2791c219b7db579c234eda0a73c",
"index": 9220,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):\n 'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}\n possible_days = []\n number_of_days = calendar.monthrange(year, month)[1]\n days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(\n days=x)) for x in range(0, number_of_days)]\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal) - 1]\n return datetime.date(year, month, day_of_month)\n",
"step-4": "import datetime\nimport calendar\nimport re\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):\n 'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}\n possible_days = []\n number_of_days = calendar.monthrange(year, month)[1]\n days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(\n days=x)) for x in range(0, number_of_days)]\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal) - 1]\n return datetime.date(year, month, day_of_month)\n",
"step-5": "import datetime\nimport calendar\nimport re\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday'\n }\n\n possible_days = []\n\n number_of_days = calendar.monthrange(year, month)[1]\n\n days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)]\n\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal)-1]\n\n return datetime.date(year, month, day_of_month)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
print(random.choice(['python', 'c++', 'java']))
print(random.choice((1.1, -5, 6, 4, 7)))
|
normal
|
{
"blob_id": "44f18d7e7713073c27fec38f0b847803eceefbc9",
"index": 2687,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(random.choice(['python', 'c++', 'java']))\nprint(random.choice((1.1, -5, 6, 4, 7)))\n",
"step-3": "import random\nprint(random.choice(['python', 'c++', 'java']))\nprint(random.choice((1.1, -5, 6, 4, 7)))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_neighbors():
previous_zero_index = -1
count = 0
result = []
for index, value in enumerate(source):
count += 1
if value == 0:
if index == 0:
previous_zero_index = 0
count = 0
result.append(0)
continue
if previous_zero_index == -1:
result[0:index] = reversed(result[0:index])
previous_zero_index = index
count = 0
result.append(0)
continue
result.append(0)
diff = (index - previous_zero_index) // 2
result[index - diff:index] = reversed(result[
previous_zero_index + 1:previous_zero_index + 1 + diff])
previous_zero_index = index
count = 0
continue
result.append(count)
for i in result:
print(i, end=' ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_neighbors():
previous_zero_index = -1
count = 0
result = []
for index, value in enumerate(source):
count += 1
if value == 0:
if index == 0:
previous_zero_index = 0
count = 0
result.append(0)
continue
if previous_zero_index == -1:
result[0:index] = reversed(result[0:index])
previous_zero_index = index
count = 0
result.append(0)
continue
result.append(0)
diff = (index - previous_zero_index) // 2
result[index - diff:index] = reversed(result[
previous_zero_index + 1:previous_zero_index + 1 + diff])
previous_zero_index = index
count = 0
continue
result.append(count)
for i in result:
print(i, end=' ')
find_neighbors()
<|reserved_special_token_1|>
array_length = int(input())
source = [int(x) for x in input().split()]
def find_neighbors():
previous_zero_index = -1
count = 0
result = []
for index, value in enumerate(source):
count += 1
if value == 0:
if index == 0:
previous_zero_index = 0
count = 0
result.append(0)
continue
if previous_zero_index == -1:
result[0:index] = reversed(result[0:index])
previous_zero_index = index
count = 0
result.append(0)
continue
result.append(0)
diff = (index - previous_zero_index) // 2
result[index - diff:index] = reversed(result[
previous_zero_index + 1:previous_zero_index + 1 + diff])
previous_zero_index = index
count = 0
continue
result.append(count)
for i in result:
print(i, end=' ')
find_neighbors()
<|reserved_special_token_1|>
array_length = int(input())
source = [int(x) for x in input().split()]
def find_neighbors():
previous_zero_index = -1
count = 0
result = []
for index, value in enumerate(source):
count += 1
if value == 0:
if index == 0:
previous_zero_index = 0
count = 0
result.append(0)
continue
if previous_zero_index == -1:
result[0: index] = reversed(result[0:index])
previous_zero_index = index
count = 0
result.append(0)
continue
result.append(0)
diff = (index - previous_zero_index) // 2
result[index - diff: index] = reversed(result[previous_zero_index + 1: previous_zero_index + 1 + diff])
previous_zero_index = index
count = 0
continue
result.append(count)
for i in result:
print(i, end=" ")
find_neighbors()
|
flexible
|
{
"blob_id": "6d362b87b595fc59df31d1f0bb561dc83633a2ac",
"index": 9216,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n if previous_zero_index == -1:\n result[0:index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff:index] = reversed(result[\n previous_zero_index + 1:previous_zero_index + 1 + diff])\n previous_zero_index = index\n count = 0\n continue\n result.append(count)\n for i in result:\n print(i, end=' ')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n if previous_zero_index == -1:\n result[0:index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff:index] = reversed(result[\n previous_zero_index + 1:previous_zero_index + 1 + diff])\n previous_zero_index = index\n count = 0\n continue\n result.append(count)\n for i in result:\n print(i, end=' ')\n\n\nfind_neighbors()\n",
"step-4": "array_length = int(input())\nsource = [int(x) for x in input().split()]\n\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n if previous_zero_index == -1:\n result[0:index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff:index] = reversed(result[\n previous_zero_index + 1:previous_zero_index + 1 + diff])\n previous_zero_index = index\n count = 0\n continue\n result.append(count)\n for i in result:\n print(i, end=' ')\n\n\nfind_neighbors()\n",
"step-5": "array_length = int(input())\nsource = [int(x) for x in input().split()]\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n\n if previous_zero_index == -1:\n result[0: index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff: index] = reversed(result[previous_zero_index + 1: previous_zero_index + 1 + diff])\n\n previous_zero_index = index\n count = 0\n continue\n\n result.append(count)\n for i in result:\n print(i, end=\" \")\n\nfind_neighbors()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)
self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,
self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self
.output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.
random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.
random.rand(self.output_size, 1), reshape)}
<|reserved_special_token_0|>
def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:
np.ndarray, layer_errors: np.ndarray, learning_rate: float):
layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors
) * learning_rate
layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))
return layer_gradient, layer_delta
def feed_forward(self, inputs: list) ->np.ndarray:
cu_inputs = np.array([inputs], dtype=np.double).T
hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')
return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')
def train(self, inputs: list, labels: list, learning_rate: float):
cu_inputs = np.asarray([inputs], dtype=np.double).T
cu_labels = np.asarray([labels], dtype=np.double).T
hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',
'h-b')
output_layer_values = self.calculate_layer_values(hidden_layer_values,
'h-o', 'o-b')
output_errors = np.subtract(cu_labels, output_layer_values)
hidden_output_gradient, hidden_output_delta = (self.
calculate_layer_gradient_delta(output_layer_values,
hidden_layer_values, output_errors, learning_rate))
self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)
self.weights['o-b'] = np.add(self.weights['o-b'],
hidden_output_gradient)
hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)
input_hidden_gradient, input_hidden_delta = (self.
calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,
hidden_errors, learning_rate))
self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)
self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)
self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,
self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self
.output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.
random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.
random.rand(self.output_size, 1), reshape)}
def calculate_layer_values(self, inputs: np.ndarray, layer_key: str,
bias_key: str):
return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs),
self.weights[bias_key]), fn.sigmoid)
def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:
np.ndarray, layer_errors: np.ndarray, learning_rate: float):
layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors
) * learning_rate
layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))
return layer_gradient, layer_delta
def feed_forward(self, inputs: list) ->np.ndarray:
cu_inputs = np.array([inputs], dtype=np.double).T
hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')
return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')
def train(self, inputs: list, labels: list, learning_rate: float):
cu_inputs = np.asarray([inputs], dtype=np.double).T
cu_labels = np.asarray([labels], dtype=np.double).T
hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',
'h-b')
output_layer_values = self.calculate_layer_values(hidden_layer_values,
'h-o', 'o-b')
output_errors = np.subtract(cu_labels, output_layer_values)
hidden_output_gradient, hidden_output_delta = (self.
calculate_layer_gradient_delta(output_layer_values,
hidden_layer_values, output_errors, learning_rate))
self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)
self.weights['o-b'] = np.add(self.weights['o-b'],
hidden_output_gradient)
hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)
input_hidden_gradient, input_hidden_delta = (self.
calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,
hidden_errors, learning_rate))
self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)
self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)
self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,
self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self
.output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.
random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.
random.rand(self.output_size, 1), reshape)}
def calculate_layer_values(self, inputs: np.ndarray, layer_key: str,
bias_key: str):
return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs),
self.weights[bias_key]), fn.sigmoid)
def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:
np.ndarray, layer_errors: np.ndarray, learning_rate: float):
layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors
) * learning_rate
layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))
return layer_gradient, layer_delta
def feed_forward(self, inputs: list) ->np.ndarray:
cu_inputs = np.array([inputs], dtype=np.double).T
hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')
return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')
def train(self, inputs: list, labels: list, learning_rate: float):
cu_inputs = np.asarray([inputs], dtype=np.double).T
cu_labels = np.asarray([labels], dtype=np.double).T
hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',
'h-b')
output_layer_values = self.calculate_layer_values(hidden_layer_values,
'h-o', 'o-b')
output_errors = np.subtract(cu_labels, output_layer_values)
hidden_output_gradient, hidden_output_delta = (self.
calculate_layer_gradient_delta(output_layer_values,
hidden_layer_values, output_errors, learning_rate))
self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)
self.weights['o-b'] = np.add(self.weights['o-b'],
hidden_output_gradient)
hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)
input_hidden_gradient, input_hidden_delta = (self.
calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,
hidden_errors, learning_rate))
self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)
self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient
)
if __name__ == '__main__':
nn = NeuralNetwork(2, 2, 1)
data_set = [([0, 0], [0]), ([0, 1], [1]), ([1, 0], [1]), ([1, 1], [0])]
start = time.perf_counter()
for _ in range(10000):
rd.shuffle(data_set)
for in_data, t_data in data_set:
nn.train(in_data, t_data, 0.1)
end = time.perf_counter()
print('Took ->>', end - start, 's')
print('-' * 100)
print(nn.feed_forward([0, 1]))
print(nn.feed_forward([1, 0]))
print(nn.feed_forward([0, 0]))
print(nn.feed_forward([1, 1]))
print('-' * 100)
<|reserved_special_token_1|>
from functools import partial
import utils.functions as fn
import random as rd
import numpy as np
import time
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)
self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,
self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self
.output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.
random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.
random.rand(self.output_size, 1), reshape)}
def calculate_layer_values(self, inputs: np.ndarray, layer_key: str,
bias_key: str):
return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs),
self.weights[bias_key]), fn.sigmoid)
def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:
np.ndarray, layer_errors: np.ndarray, learning_rate: float):
layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors
) * learning_rate
layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))
return layer_gradient, layer_delta
def feed_forward(self, inputs: list) ->np.ndarray:
cu_inputs = np.array([inputs], dtype=np.double).T
hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')
return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')
def train(self, inputs: list, labels: list, learning_rate: float):
cu_inputs = np.asarray([inputs], dtype=np.double).T
cu_labels = np.asarray([labels], dtype=np.double).T
hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',
'h-b')
output_layer_values = self.calculate_layer_values(hidden_layer_values,
'h-o', 'o-b')
output_errors = np.subtract(cu_labels, output_layer_values)
hidden_output_gradient, hidden_output_delta = (self.
calculate_layer_gradient_delta(output_layer_values,
hidden_layer_values, output_errors, learning_rate))
self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)
self.weights['o-b'] = np.add(self.weights['o-b'],
hidden_output_gradient)
hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)
input_hidden_gradient, input_hidden_delta = (self.
calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,
hidden_errors, learning_rate))
self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)
self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient
)
if __name__ == '__main__':
nn = NeuralNetwork(2, 2, 1)
data_set = [([0, 0], [0]), ([0, 1], [1]), ([1, 0], [1]), ([1, 1], [0])]
start = time.perf_counter()
for _ in range(10000):
rd.shuffle(data_set)
for in_data, t_data in data_set:
nn.train(in_data, t_data, 0.1)
end = time.perf_counter()
print('Took ->>', end - start, 's')
print('-' * 100)
print(nn.feed_forward([0, 1]))
print(nn.feed_forward([1, 0]))
print(nn.feed_forward([0, 0]))
print(nn.feed_forward([1, 1]))
print('-' * 100)
<|reserved_special_token_1|>
from functools import partial
import utils.functions as fn
import random as rd
import numpy as np
import time
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)
self.weights = {
'i-h': fn.apply(np.random.rand(self.hidden_size, self.input_size), reshape),
'h-o': fn.apply(np.random.rand(self.output_size, self.hidden_size), reshape),
'h-b': fn.apply(np.random.rand(self.hidden_size, 1), reshape),
'o-b': fn.apply(np.random.rand(self.output_size, 1), reshape),
}
def calculate_layer_values(self, inputs: np.ndarray, layer_key: str, bias_key: str):
return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs), self.weights[bias_key]), fn.sigmoid)
def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer: np.ndarray, layer_errors: np.ndarray, learning_rate: float):
layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors)*learning_rate
layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))
return layer_gradient, layer_delta
def feed_forward(self, inputs: list) -> np.ndarray:
cu_inputs = np.array([inputs], dtype=np.double).T
hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')
return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')
def train(self, inputs: list, labels: list, learning_rate: float):
cu_inputs = np.asarray([inputs], dtype=np.double).T
cu_labels = np.asarray([labels], dtype=np.double).T
hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')
output_layer_values = self.calculate_layer_values(hidden_layer_values, 'h-o', 'o-b')
output_errors = np.subtract(cu_labels, output_layer_values)
hidden_output_gradient, hidden_output_delta = self.calculate_layer_gradient_delta(
output_layer_values, hidden_layer_values, output_errors, learning_rate)
self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)
self.weights['o-b'] = np.add(self.weights['o-b'], hidden_output_gradient)
hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)
input_hidden_gradient, input_hidden_delta = self.calculate_layer_gradient_delta(
hidden_layer_values, cu_inputs, hidden_errors, learning_rate)
self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)
self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient)
if __name__ == "__main__":
nn = NeuralNetwork(2, 2, 1)
data_set = [
([0, 0], [0]),
([0, 1], [1]),
([1, 0], [1]),
([1, 1], [0]),
]
start = time.perf_counter()
for _ in range(10000):
rd.shuffle(data_set)
for in_data, t_data in data_set:
nn.train(in_data, t_data, 0.1)
end = time.perf_counter()
print("Took ->>", end-start, "s")
print('-'*100)
print(nn.feed_forward([0, 1]))
print(nn.feed_forward([1, 0]))
print(nn.feed_forward([0, 0]))
print(nn.feed_forward([1, 1]))
print('-'*100)
|
flexible
|
{
"blob_id": "f24516d8977b10b1ccece2f8eaec6e08ce0c2e16",
"index": 9689,
"step-1": "<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, input_size, hidden_size, output_size):\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)\n self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,\n self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self\n .output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.\n random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.\n random.rand(self.output_size, 1), reshape)}\n <mask token>\n\n def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:\n np.ndarray, layer_errors: np.ndarray, learning_rate: float):\n layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors\n ) * learning_rate\n layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))\n return layer_gradient, layer_delta\n\n def feed_forward(self, inputs: list) ->np.ndarray:\n cu_inputs = np.array([inputs], dtype=np.double).T\n hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')\n return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')\n\n def train(self, inputs: list, labels: list, learning_rate: float):\n cu_inputs = np.asarray([inputs], dtype=np.double).T\n cu_labels = np.asarray([labels], dtype=np.double).T\n hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',\n 'h-b')\n output_layer_values = self.calculate_layer_values(hidden_layer_values,\n 'h-o', 'o-b')\n output_errors = np.subtract(cu_labels, output_layer_values)\n hidden_output_gradient, hidden_output_delta = (self.\n calculate_layer_gradient_delta(output_layer_values,\n hidden_layer_values, output_errors, learning_rate))\n self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)\n self.weights['o-b'] = np.add(self.weights['o-b'],\n hidden_output_gradient)\n hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)\n input_hidden_gradient, input_hidden_delta = (self.\n calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,\n hidden_errors, learning_rate))\n self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)\n self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, input_size, hidden_size, output_size):\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)\n self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,\n self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self\n .output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.\n random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.\n random.rand(self.output_size, 1), reshape)}\n\n def calculate_layer_values(self, inputs: np.ndarray, layer_key: str,\n bias_key: str):\n return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs),\n self.weights[bias_key]), fn.sigmoid)\n\n def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:\n np.ndarray, layer_errors: np.ndarray, learning_rate: float):\n layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors\n ) * learning_rate\n layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))\n return layer_gradient, layer_delta\n\n def feed_forward(self, inputs: list) ->np.ndarray:\n cu_inputs = np.array([inputs], dtype=np.double).T\n hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')\n return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')\n\n def train(self, inputs: list, labels: list, learning_rate: float):\n cu_inputs = np.asarray([inputs], dtype=np.double).T\n cu_labels = np.asarray([labels], dtype=np.double).T\n hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',\n 'h-b')\n output_layer_values = self.calculate_layer_values(hidden_layer_values,\n 'h-o', 'o-b')\n output_errors = np.subtract(cu_labels, output_layer_values)\n hidden_output_gradient, hidden_output_delta = (self.\n calculate_layer_gradient_delta(output_layer_values,\n hidden_layer_values, output_errors, learning_rate))\n self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)\n self.weights['o-b'] = np.add(self.weights['o-b'],\n hidden_output_gradient)\n hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)\n input_hidden_gradient, input_hidden_delta = (self.\n calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,\n hidden_errors, learning_rate))\n self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)\n self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, input_size, hidden_size, output_size):\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)\n self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,\n self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self\n .output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.\n random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.\n random.rand(self.output_size, 1), reshape)}\n\n def calculate_layer_values(self, inputs: np.ndarray, layer_key: str,\n bias_key: str):\n return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs),\n self.weights[bias_key]), fn.sigmoid)\n\n def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:\n np.ndarray, layer_errors: np.ndarray, learning_rate: float):\n layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors\n ) * learning_rate\n layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))\n return layer_gradient, layer_delta\n\n def feed_forward(self, inputs: list) ->np.ndarray:\n cu_inputs = np.array([inputs], dtype=np.double).T\n hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')\n return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')\n\n def train(self, inputs: list, labels: list, learning_rate: float):\n cu_inputs = np.asarray([inputs], dtype=np.double).T\n cu_labels = np.asarray([labels], dtype=np.double).T\n hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',\n 'h-b')\n output_layer_values = self.calculate_layer_values(hidden_layer_values,\n 'h-o', 'o-b')\n output_errors = np.subtract(cu_labels, output_layer_values)\n hidden_output_gradient, hidden_output_delta = (self.\n calculate_layer_gradient_delta(output_layer_values,\n hidden_layer_values, output_errors, learning_rate))\n self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)\n self.weights['o-b'] = np.add(self.weights['o-b'],\n hidden_output_gradient)\n hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)\n input_hidden_gradient, input_hidden_delta = (self.\n calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,\n hidden_errors, learning_rate))\n self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)\n self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient\n )\n\n\nif __name__ == '__main__':\n nn = NeuralNetwork(2, 2, 1)\n data_set = [([0, 0], [0]), ([0, 1], [1]), ([1, 0], [1]), ([1, 1], [0])]\n start = time.perf_counter()\n for _ in range(10000):\n rd.shuffle(data_set)\n for in_data, t_data in data_set:\n nn.train(in_data, t_data, 0.1)\n end = time.perf_counter()\n print('Took ->>', end - start, 's')\n print('-' * 100)\n print(nn.feed_forward([0, 1]))\n print(nn.feed_forward([1, 0]))\n print(nn.feed_forward([0, 0]))\n print(nn.feed_forward([1, 1]))\n print('-' * 100)\n",
"step-4": "from functools import partial\nimport utils.functions as fn\nimport random as rd\nimport numpy as np\nimport time\n\n\nclass NeuralNetwork:\n\n def __init__(self, input_size, hidden_size, output_size):\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)\n self.weights = {'i-h': fn.apply(np.random.rand(self.hidden_size,\n self.input_size), reshape), 'h-o': fn.apply(np.random.rand(self\n .output_size, self.hidden_size), reshape), 'h-b': fn.apply(np.\n random.rand(self.hidden_size, 1), reshape), 'o-b': fn.apply(np.\n random.rand(self.output_size, 1), reshape)}\n\n def calculate_layer_values(self, inputs: np.ndarray, layer_key: str,\n bias_key: str):\n return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs),\n self.weights[bias_key]), fn.sigmoid)\n\n def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer:\n np.ndarray, layer_errors: np.ndarray, learning_rate: float):\n layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors\n ) * learning_rate\n layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))\n return layer_gradient, layer_delta\n\n def feed_forward(self, inputs: list) ->np.ndarray:\n cu_inputs = np.array([inputs], dtype=np.double).T\n hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')\n return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')\n\n def train(self, inputs: list, labels: list, learning_rate: float):\n cu_inputs = np.asarray([inputs], dtype=np.double).T\n cu_labels = np.asarray([labels], dtype=np.double).T\n hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h',\n 'h-b')\n output_layer_values = self.calculate_layer_values(hidden_layer_values,\n 'h-o', 'o-b')\n output_errors = np.subtract(cu_labels, output_layer_values)\n hidden_output_gradient, hidden_output_delta = (self.\n calculate_layer_gradient_delta(output_layer_values,\n hidden_layer_values, output_errors, learning_rate))\n self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)\n self.weights['o-b'] = np.add(self.weights['o-b'],\n hidden_output_gradient)\n hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)\n input_hidden_gradient, input_hidden_delta = (self.\n calculate_layer_gradient_delta(hidden_layer_values, cu_inputs,\n hidden_errors, learning_rate))\n self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)\n self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient\n )\n\n\nif __name__ == '__main__':\n nn = NeuralNetwork(2, 2, 1)\n data_set = [([0, 0], [0]), ([0, 1], [1]), ([1, 0], [1]), ([1, 1], [0])]\n start = time.perf_counter()\n for _ in range(10000):\n rd.shuffle(data_set)\n for in_data, t_data in data_set:\n nn.train(in_data, t_data, 0.1)\n end = time.perf_counter()\n print('Took ->>', end - start, 's')\n print('-' * 100)\n print(nn.feed_forward([0, 1]))\n print(nn.feed_forward([1, 0]))\n print(nn.feed_forward([0, 0]))\n print(nn.feed_forward([1, 1]))\n print('-' * 100)\n",
"step-5": "from functools import partial\n\nimport utils.functions as fn\nimport random as rd\nimport numpy as np\n\nimport time\n\n\nclass NeuralNetwork:\n def __init__(self, input_size, hidden_size, output_size):\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n\n reshape = partial(fn.translate, start1=0, stop1=1, start2=-1, stop2=1)\n self.weights = {\n 'i-h': fn.apply(np.random.rand(self.hidden_size, self.input_size), reshape),\n 'h-o': fn.apply(np.random.rand(self.output_size, self.hidden_size), reshape),\n 'h-b': fn.apply(np.random.rand(self.hidden_size, 1), reshape),\n 'o-b': fn.apply(np.random.rand(self.output_size, 1), reshape),\n }\n\n def calculate_layer_values(self, inputs: np.ndarray, layer_key: str, bias_key: str):\n return fn.apply(np.add(np.matmul(self.weights[layer_key], inputs), self.weights[bias_key]), fn.sigmoid)\n\n def calculate_layer_gradient_delta(self, layer: np.ndarray, next_layer: np.ndarray, layer_errors: np.ndarray, learning_rate: float):\n layer_gradient = np.multiply(fn.apply(layer, fn.dsigmoid), layer_errors)*learning_rate\n layer_delta = np.matmul(layer_gradient, np.transpose(next_layer))\n return layer_gradient, layer_delta\n\n def feed_forward(self, inputs: list) -> np.ndarray:\n cu_inputs = np.array([inputs], dtype=np.double).T\n hidden_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')\n return self.calculate_layer_values(hidden_values, 'h-o', 'o-b')\n\n def train(self, inputs: list, labels: list, learning_rate: float):\n cu_inputs = np.asarray([inputs], dtype=np.double).T\n cu_labels = np.asarray([labels], dtype=np.double).T\n\n hidden_layer_values = self.calculate_layer_values(cu_inputs, 'i-h', 'h-b')\n output_layer_values = self.calculate_layer_values(hidden_layer_values, 'h-o', 'o-b')\n\n output_errors = np.subtract(cu_labels, output_layer_values)\n hidden_output_gradient, hidden_output_delta = self.calculate_layer_gradient_delta(\n output_layer_values, hidden_layer_values, output_errors, learning_rate)\n\n self.weights['h-o'] = np.add(self.weights['h-o'], hidden_output_delta)\n self.weights['o-b'] = np.add(self.weights['o-b'], hidden_output_gradient)\n\n hidden_errors = np.matmul(self.weights['h-o'].T, output_errors)\n\n input_hidden_gradient, input_hidden_delta = self.calculate_layer_gradient_delta(\n hidden_layer_values, cu_inputs, hidden_errors, learning_rate)\n\n self.weights['i-h'] = np.add(self.weights['i-h'], input_hidden_delta)\n self.weights['h-b'] = np.add(self.weights['h-b'], input_hidden_gradient)\n\n\nif __name__ == \"__main__\":\n nn = NeuralNetwork(2, 2, 1)\n data_set = [\n ([0, 0], [0]),\n ([0, 1], [1]),\n ([1, 0], [1]),\n ([1, 1], [0]),\n ]\n\n start = time.perf_counter()\n for _ in range(10000):\n rd.shuffle(data_set)\n for in_data, t_data in data_set:\n nn.train(in_data, t_data, 0.1)\n\n end = time.perf_counter()\n print(\"Took ->>\", end-start, \"s\")\n print('-'*100)\n print(nn.feed_forward([0, 1]))\n print(nn.feed_forward([1, 0]))\n print(nn.feed_forward([0, 0]))\n print(nn.feed_forward([1, 1]))\n print('-'*100)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GroupKFold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Lasso
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
# Sort data for future imputation
test.sort_values(by=['building_id','timestamp'], inplace=True)
# Merging data
test = (test
.merge(building_metadata, on = 'building_id', how='left')
.merge(weather_test, on = ['site_id','timestamp'], how='left'))
del building_metadata
del weather_test
#Add dates variables
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
#Eliminate problematic variables
test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)
# Imputation
test = test.interpolate()
test.drop(test[test.hour==0].index, inplace=True)
test.drop(test[test.hour==1].index, inplace=True)
test.drop(test[test.hour==2].index, inplace=True)
test.drop(test[test.hour==3].index, inplace=True)
test.drop(test[test.hour==4].index, inplace=True)
test.drop(test[test.hour==5].index, inplace=True)
test.drop(test[test.hour==6].index, inplace=True)
test.drop(test[test.hour==7].index, inplace=True)
test.drop(test[test.hour==8].index, inplace=True)
test.drop(test[test.hour==9].index, inplace=True)
test.drop(test[test.hour==10].index, inplace=True)
test.drop(test[test.hour==11].index, inplace=True)
test.drop(test[test.hour==12].index, inplace=True)
test.drop(test[test.hour==13].index, inplace=True)
test.drop(test[test.hour==14].index, inplace=True)
test.drop(test[test.hour==15].index, inplace=True)
test.drop(test[test.hour==16].index, inplace=True)
test.drop(test[test.hour==17].index, inplace=True)
test.drop(test[test.hour==18].index, inplace=True)
test.drop(test[test.hour==19].index, inplace=True)
test.drop(test[test.hour==20].index, inplace=True)
test.drop(test[test.hour==21].index, inplace=True)
# One Hot Encoding
encode = OneHotEncoder(categories='auto',drop = 'first')
catego_var = test.loc[:,['building_id','meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']
encode_var = pd.DataFrame(catego_var, columns = encode_names)
test.drop('meter', inplace=True, axis = 1)
test.reset_index(drop=True,inplace=True)
test = test.join(encode_var)
# Add row as set_index
test.set_index('row_id', inplace=True)
return test
#X_train, y_train = train_lasso()
#mod_lasso = Lasso()
#mod_lasso.fit(X_train, y_train)
#print(mod_lasso.coef_)
from joblib import dump, load
mod_lasso = load('mod_lasso.joblib')
X_test = test_lasso()
y_pred = mod_lasso.predict(X_test)
print(X_test.head())
sub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])
sub.sort_values(by = 'row_id', inplace = True)
sub.to_csv('./submission12.csv')
|
normal
|
{
"blob_id": "6028b46eab422dea02af24e9cf724fe0d8b3ecc4",
"index": 9531,
"step-1": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\nprint(X_test.head())\n<mask token>\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-3": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\nmod_lasso = load('mod_lasso.joblib')\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\nsub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[\n 'meter_reading'])\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import Lasso\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\nfrom joblib import dump, load\nmod_lasso = load('mod_lasso.joblib')\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\nsub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[\n 'meter_reading'])\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import Lasso\n\n\ndef test_lasso():\n\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n\n # Sort data for future imputation\n test.sort_values(by=['building_id','timestamp'], inplace=True)\n\n # Merging data\n test = (test\n .merge(building_metadata, on = 'building_id', how='left')\n .merge(weather_test, on = ['site_id','timestamp'], how='left'))\n\n del building_metadata\n del weather_test\n\n #Add dates variables\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n\n #Eliminate problematic variables\n test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)\n\n # Imputation\n test = test.interpolate()\n test.drop(test[test.hour==0].index, inplace=True)\n test.drop(test[test.hour==1].index, inplace=True)\n test.drop(test[test.hour==2].index, inplace=True)\n test.drop(test[test.hour==3].index, inplace=True)\n test.drop(test[test.hour==4].index, inplace=True)\n test.drop(test[test.hour==5].index, inplace=True)\n test.drop(test[test.hour==6].index, inplace=True)\n test.drop(test[test.hour==7].index, inplace=True)\n test.drop(test[test.hour==8].index, inplace=True)\n test.drop(test[test.hour==9].index, inplace=True)\n test.drop(test[test.hour==10].index, inplace=True)\n test.drop(test[test.hour==11].index, inplace=True)\n test.drop(test[test.hour==12].index, inplace=True)\n test.drop(test[test.hour==13].index, inplace=True)\n test.drop(test[test.hour==14].index, inplace=True)\n test.drop(test[test.hour==15].index, inplace=True)\n test.drop(test[test.hour==16].index, inplace=True)\n test.drop(test[test.hour==17].index, inplace=True)\n test.drop(test[test.hour==18].index, inplace=True)\n test.drop(test[test.hour==19].index, inplace=True)\n test.drop(test[test.hour==20].index, inplace=True)\n test.drop(test[test.hour==21].index, inplace=True)\n\n # One Hot Encoding\n\n encode = OneHotEncoder(categories='auto',drop = 'first')\n catego_var = test.loc[:,['building_id','meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']\n encode_var = pd.DataFrame(catego_var, columns = encode_names)\n\n test.drop('meter', inplace=True, axis = 1)\n test.reset_index(drop=True,inplace=True)\n test = test.join(encode_var)\n\n # Add row as set_index\n test.set_index('row_id', inplace=True)\n\n return test\n\n\n\n#X_train, y_train = train_lasso()\n\n#mod_lasso = Lasso()\n#mod_lasso.fit(X_train, y_train)\n\n#print(mod_lasso.coef_)\nfrom joblib import dump, load\nmod_lasso = load('mod_lasso.joblib') \n\n\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\n\nsub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])\nsub.sort_values(by = 'row_id', inplace = True)\nsub.to_csv('./submission12.csv')",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from .. import db
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True)
acc = db.Column(db.String(50), unique=True)#TODO 调整长度
pwd = db.Column(db.String(50))#TODO 调整长度
name = db.Column(db.String(20))
sex = db.Column(db.SmallInteger)
idno = db.Column(db.String(20))
phone = db.Column(db.String(20))
crttime = db.Column(db.TIMESTAMP)
crtip = db.Column(db.String(50))
crtmac = db.Column(db.String(50))
crtplat = db.Column(db.SmallInteger)
crtrole = db.Column(db.SmallInteger)
lasttime = db.Column(db.TIMESTAMP)
lastip = db.Column(db.String(50))
lastmac = db.Column(db.String(50))
lastplat = db.Column(db.SmallInteger)
lastrole = db.Column(db.SmallInteger)
transporter = db.relationship('Transporter', uselist=False)
consignor = db.relationship('Consignor', uselist=False)
def __init__(self, acc, pwd):
self.acc = acc
self.pwd = pwd
def __repr__(self):
return '<Account %s %s>'%(str(self.id), self.acc)
class Transporter(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
d_lic = db.Column(db.String(50)) #TODO 长度
v_lic = db.Column(db.String(50))
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Transporter %s>'%str(self.id)
class Consignor(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
indents = db.relationship('Indent', lazy='dynamic')
def __init__(self):
pass
def __repr__(self):
return '<Consignor %s>'%str(self.id)
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>'%str(self.id)
|
normal
|
{
"blob_id": "b6824251b1165ca6c66049d40c79fccee6bc7d3a",
"index": 159,
"step-1": "<mask token>\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-2": "<mask token>\n\n\nclass Account(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Account %s %s>' % (str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50))\n v_lic = db.Column(db.String(50))\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>' % str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-3": "<mask token>\n\n\nclass Account(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, acc, pwd):\n self.acc = acc\n self.pwd = pwd\n\n def __repr__(self):\n return '<Account %s %s>' % (str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50))\n v_lic = db.Column(db.String(50))\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>' % str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-4": "<mask token>\n\n\nclass Account(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n acc = db.Column(db.String(50), unique=True)\n pwd = db.Column(db.String(50))\n name = db.Column(db.String(20))\n sex = db.Column(db.SmallInteger)\n idno = db.Column(db.String(20))\n phone = db.Column(db.String(20))\n crttime = db.Column(db.TIMESTAMP)\n crtip = db.Column(db.String(50))\n crtmac = db.Column(db.String(50))\n crtplat = db.Column(db.SmallInteger)\n crtrole = db.Column(db.SmallInteger)\n lasttime = db.Column(db.TIMESTAMP)\n lastip = db.Column(db.String(50))\n lastmac = db.Column(db.String(50))\n lastplat = db.Column(db.SmallInteger)\n lastrole = db.Column(db.SmallInteger)\n transporter = db.relationship('Transporter', uselist=False)\n consignor = db.relationship('Consignor', uselist=False)\n\n def __init__(self, acc, pwd):\n self.acc = acc\n self.pwd = pwd\n\n def __repr__(self):\n return '<Account %s %s>' % (str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50))\n v_lic = db.Column(db.String(50))\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>' % str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-5": "from .. import db\n\n\nclass Account(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n acc = db.Column(db.String(50), unique=True)#TODO 调整长度\n pwd = db.Column(db.String(50))#TODO 调整长度\n name = db.Column(db.String(20))\n sex = db.Column(db.SmallInteger)\n idno = db.Column(db.String(20))\n phone = db.Column(db.String(20))\n crttime = db.Column(db.TIMESTAMP)\n crtip = db.Column(db.String(50))\n crtmac = db.Column(db.String(50))\n crtplat = db.Column(db.SmallInteger)\n crtrole = db.Column(db.SmallInteger)\n lasttime = db.Column(db.TIMESTAMP)\n lastip = db.Column(db.String(50))\n lastmac = db.Column(db.String(50))\n lastplat = db.Column(db.SmallInteger)\n lastrole = db.Column(db.SmallInteger)\n\n transporter = db.relationship('Transporter', uselist=False)\n consignor = db.relationship('Consignor', uselist=False)\n\n def __init__(self, acc, pwd):\n self.acc = acc\n self.pwd = pwd\n\n def __repr__(self):\n return '<Account %s %s>'%(str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50)) #TODO 长度\n v_lic = db.Column(db.String(50))\n\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>'%str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>'%str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>'%str(self.id)\n",
"step-ids": [
8,
14,
15,
16,
18
]
}
|
[
8,
14,
15,
16,
18
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.