code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import logging
import os
import time
import urllib
from collections import namedtuple
from statistics import mean
from urllib.request import urlopen
import bs4
import regex as re
from tika import parser
from scipy.stats import ks_2samp
import config
from TFU.trueformathtml import TrueFormatUpmarkerHTML
from TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX
from helpers.str_tools import remove_ugly_chars
class PaperReader:
""" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.
Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,
page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the
texts of different pages, where sentences continue.
detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.
"""
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately '
'those who think they ought to be so regarded seem to be winning. '
'Under these circumstances, it does seem odd for McDermott to devote '
'much space to complaining about the logical basis of a book whose '
'very title proclaims it is about logical foundations. In any '
'case, given such a title, it wouldnt seem necessary that readers '
'should be warned that the foundations being explored are not '
'In competition with this diversity is the idea of a unified model '
'of inference. The desire for such a model is strong among those '
'who study declarative representations, and Genesereth and Nilsson '
'are no exception. As are most of their colleagues, they are drawn '
'to the model of inference as the derivation of conclusions that '
'are entailed by a set of beliefs. They wander from this idea in a '
'few places but not for long. It is not hard to see why: Deduction '
'is one of the fews kinds of inference for which we have an '
'interesting general theory. '.lower()
)
def just_extract_text_from_html(self, adress):
logging.info(f"extracting text from {adress}")
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent="lxml")
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, "r") as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(f"pdf2htmlEX "
f"--optimize-text 1 "
f"--fit-width {config.reader_width} "
f"\"{adress}\" \"{paths.html_before_indexing}\"")
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning("trying with html...")
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f"cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"")
self.text = " ".join(list(tfu.indexed_words.values()))
# needed for topic modelling
with open(paths.txt_path, "w") as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f"extracted text: {self.text[100:]}")
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read() # a `bytes` object
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info("transferring text to CorpusCook...")
paragraphs = self.text.split('\n\n')
print("mean length of splitted lines", (mean([len(p) for p in paragraphs])))
# If TIKA resolved '\n'
if (mean([len(p) for p in paragraphs])) > 80:
paragraphs = [re.sub(r"- *\n", '', p) for p in paragraphs]
paragraphs = [p.replace('\n', " ") for p in paragraphs]
paragraphs = [p.replace(';', " ") for p in paragraphs]
joiner = " "
else:
# If TIKA did not
joiner = " "
processed_text = joiner.join([p
for p in paragraphs
if
p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold
]
)
return processed_text.strip()[:self.length_limit]
DocPaths = namedtuple("DocPaths", ["html_before_indexing",
"html_after_indexing",
"apache_path",
"json_path",
"txt_path"])
def pdfpath2htmlpaths(self, adress):
# file_extension = os.path.splitext(adress)[1] keep it, but unused
# path = os.path.dirname(adress)
filename = os.path.basename(adress)
html_before_indexing = config.appcorpuscook_docs_document_dir + filename + ".html"
filename = remove_ugly_chars(filename)
html_after_indexing = config.appcorpuscook_docs_document_dir + filename + ".pdf2htmlEX.html"
json_path = config.appcorpuscook_docs_json_dir + filename + ".json"
txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt"
apache_path = config.apache_dir_document + filename + ".html"
return self.DocPaths(
html_before_indexing,
html_after_indexing,
apache_path,
json_path,
txt_path)
def get_only_real_words(self, text, wordlist):
return text #" ".join([word for word in text.split() if word in wordlist])
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = config.appcorpuscook_diff_document_dir + filename
filename = remove_ugly_chars(filename)
html_after_indexing = config.appcorpuscook_diff_html_dir + filename + ".pdf2htmlEX.html"
json_path = config.appcorpuscook_diff_json_dir + filename + ".json"
txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt"
apache_path = config.apache_dir_document + filename + ".html"
return self.DocPaths(
html_before_indexing,
html_after_indexing,
apache_path,
json_path,
txt_path)
|
normal
|
{
"blob_id": "4d2cb3e0bdd331a1de7f07eb0109f02c9cf832a8",
"index": 7441,
"step-1": "<mask token>\n\n\nclass PaperReader:\n <mask token>\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n <mask token>\n <mask token>\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-2": "<mask token>\n\n\nclass PaperReader:\n <mask token>\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-3": "<mask token>\n\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-4": "import logging\nimport os\nimport time\nimport urllib\nfrom collections import namedtuple\nfrom statistics import mean\nfrom urllib.request import urlopen\nimport bs4\nimport regex as re\nfrom tika import parser\nfrom scipy.stats import ks_2samp\nimport config\nfrom TFU.trueformathtml import TrueFormatUpmarkerHTML\nfrom TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX\nfrom helpers.str_tools import remove_ugly_chars\n\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-5": "import logging\nimport os\nimport time\nimport urllib\nfrom collections import namedtuple\nfrom statistics import mean\nfrom urllib.request import urlopen\nimport bs4\nimport regex as re\nfrom tika import parser\nfrom scipy.stats import ks_2samp\n\nimport config\nfrom TFU.trueformathtml import TrueFormatUpmarkerHTML\nfrom TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX\nfrom helpers.str_tools import remove_ugly_chars\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately '\n 'those who think they ought to be so regarded seem to be winning. '\n 'Under these circumstances, it does seem odd for McDermott to devote '\n 'much space to complaining about the logical basis of a book whose '\n 'very title proclaims it is about logical foundations. In any '\n 'case, given such a title, it wouldnt seem necessary that readers '\n 'should be warned that the foundations being explored are not '\n 'In competition with this diversity is the idea of a unified model '\n 'of inference. The desire for such a model is strong among those '\n 'who study declarative representations, and Genesereth and Nilsson '\n 'are no exception. As are most of their colleagues, they are drawn '\n 'to the model of inference as the derivation of conclusions that '\n 'are entailed by a set of beliefs. They wander from this idea in a '\n 'few places but not for long. It is not hard to see why: Deduction '\n 'is one of the fews kinds of inference for which we have an '\n 'interesting general theory. '.lower()\n )\n\n def just_extract_text_from_html(self, adress):\n logging.info(f\"extracting text from {adress}\")\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent=\"lxml\")\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, \"r\") as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n\n if config.parse_pdf2htmlEX:\n os.system(f\"pdf2htmlEX \"\n f\"--optimize-text 1 \"\n f\"--fit-width {config.reader_width} \"\n f\"\\\"{adress}\\\" \\\"{paths.html_before_indexing}\\\"\")\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning(\"trying with html...\")\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n\n tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f\"cp \\\"{paths.html_after_indexing}\\\" \\\"{paths.apache_path}\\\"\")\n self.text = \" \".join(list(tfu.indexed_words.values()))\n\n\n # needed for topic modelling\n with open(paths.txt_path, \"w\") as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n\n logging.info(f\"extracted text: {self.text[100:]}\")\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read() # a `bytes` object\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]\n\n DocPaths = namedtuple(\"DocPaths\", [\"html_before_indexing\",\n \"html_after_indexing\",\n \"apache_path\",\n \"json_path\",\n \"txt_path\"])\n\n def pdfpath2htmlpaths(self, adress):\n # file_extension = os.path.splitext(adress)[1] keep it, but unused\n # path = os.path.dirname(adress)\n filename = os.path.basename(adress)\n html_before_indexing = config.appcorpuscook_docs_document_dir + filename + \".html\"\n filename = remove_ugly_chars(filename)\n html_after_indexing = config.appcorpuscook_docs_document_dir + filename + \".pdf2htmlEX.html\"\n json_path = config.appcorpuscook_docs_json_dir + filename + \".json\"\n txt_path = config.appcorpuscook_docs_txt_dir + filename + \".txt\"\n apache_path = config.apache_dir_document + filename + \".html\"\n\n return self.DocPaths(\n html_before_indexing,\n html_after_indexing,\n apache_path,\n json_path,\n txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text #\" \".join([word for word in text.split() if word in wordlist])\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = config.appcorpuscook_diff_document_dir + filename\n filename = remove_ugly_chars(filename)\n html_after_indexing = config.appcorpuscook_diff_html_dir + filename + \".pdf2htmlEX.html\"\n json_path = config.appcorpuscook_diff_json_dir + filename + \".json\"\n txt_path = config.appcorpuscook_docs_txt_dir + filename + \".txt\"\n apache_path = config.apache_dir_document + filename + \".html\"\n\n return self.DocPaths(\n html_before_indexing,\n html_after_indexing,\n apache_path,\n json_path,\n txt_path)\n\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
'''
Created on Dec 18, 2011
@author: ppa
'''
import unittest
from ultrafinance.pyTaLib.indicator import Sma
class testPyTaLib(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSma(self):
sma = Sma(period = 3)
expectedAvgs = [1, 1.5, 2, 3, 4]
for index, number in enumerate(range(1, 6) ):
self.assertEqual(expectedAvgs[index], sma(number))
|
normal
|
{
"blob_id": "fcd2bd91dff3193c661d71ade8039765f8498fd4",
"index": 8317,
"step-1": "<mask token>\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n <mask token>\n\n def testSma(self):\n sma = Sma(period=3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6)):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-3": "<mask token>\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSma(self):\n sma = Sma(period=3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6)):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-4": "<mask token>\nimport unittest\nfrom ultrafinance.pyTaLib.indicator import Sma\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSma(self):\n sma = Sma(period=3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6)):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-5": "'''\nCreated on Dec 18, 2011\n\n@author: ppa\n'''\nimport unittest\nfrom ultrafinance.pyTaLib.indicator import Sma\n\nclass testPyTaLib(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSma(self):\n sma = Sma(period = 3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6) ):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import register
from luxon import router
from luxon.helpers.api import sql_list, obj
from infinitystone.models.roles import infinitystone_role
@register.resources()
class Roles(object):
def __init__(self):
router.add('GET', '/v1/role/{id}', self.role,
tag='roles:view')
router.add('GET', '/v1/roles', self.roles,
tag='roles:view')
router.add('POST', '/v1/role', self.create,
tag='roles:admin')
router.add(['PUT', 'PATCH'], '/v1/role/{id}', self.update,
tag='roles:admin')
router.add('DELETE', '/v1/role/{id}', self.delete,
tag='roles:admin')
def role(self, req, resp, id):
return obj(req, infinitystone_role, sql_id=id)
def roles(self, req, resp):
return sql_list(req, 'infinitystone_role',
search={'id': str,
'name': str})
def create(self, req, resp):
role = obj(req, infinitystone_role)
role.commit()
return role
def update(self, req, resp, id):
role = obj(req, infinitystone_role, sql_id=id)
role.commit()
return role
def delete(self, req, resp, id):
role = obj(req, infinitystone_role, sql_id=id)
role.commit()
return role
|
normal
|
{
"blob_id": "13e27c29839286988b37d2d3685f54d42fd57973",
"index": 9773,
"step-1": "<mask token>\n\n\n@register.resources()\nclass Roles(object):\n <mask token>\n\n def role(self, req, resp, id):\n return obj(req, infinitystone_role, sql_id=id)\n\n def roles(self, req, resp):\n return sql_list(req, 'infinitystone_role', search={'id': str,\n 'name': str})\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\n@register.resources()\nclass Roles(object):\n <mask token>\n\n def role(self, req, resp, id):\n return obj(req, infinitystone_role, sql_id=id)\n\n def roles(self, req, resp):\n return sql_list(req, 'infinitystone_role', search={'id': str,\n 'name': str})\n <mask token>\n\n def update(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n\n def delete(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n",
"step-3": "<mask token>\n\n\n@register.resources()\nclass Roles(object):\n\n def __init__(self):\n router.add('GET', '/v1/role/{id}', self.role, tag='roles:view')\n router.add('GET', '/v1/roles', self.roles, tag='roles:view')\n router.add('POST', '/v1/role', self.create, tag='roles:admin')\n router.add(['PUT', 'PATCH'], '/v1/role/{id}', self.update, tag=\n 'roles:admin')\n router.add('DELETE', '/v1/role/{id}', self.delete, tag='roles:admin')\n\n def role(self, req, resp, id):\n return obj(req, infinitystone_role, sql_id=id)\n\n def roles(self, req, resp):\n return sql_list(req, 'infinitystone_role', search={'id': str,\n 'name': str})\n <mask token>\n\n def update(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n\n def delete(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n",
"step-4": "from luxon import register\nfrom luxon import router\nfrom luxon.helpers.api import sql_list, obj\nfrom infinitystone.models.roles import infinitystone_role\n\n\n@register.resources()\nclass Roles(object):\n\n def __init__(self):\n router.add('GET', '/v1/role/{id}', self.role, tag='roles:view')\n router.add('GET', '/v1/roles', self.roles, tag='roles:view')\n router.add('POST', '/v1/role', self.create, tag='roles:admin')\n router.add(['PUT', 'PATCH'], '/v1/role/{id}', self.update, tag=\n 'roles:admin')\n router.add('DELETE', '/v1/role/{id}', self.delete, tag='roles:admin')\n\n def role(self, req, resp, id):\n return obj(req, infinitystone_role, sql_id=id)\n\n def roles(self, req, resp):\n return sql_list(req, 'infinitystone_role', search={'id': str,\n 'name': str})\n\n def create(self, req, resp):\n role = obj(req, infinitystone_role)\n role.commit()\n return role\n\n def update(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n\n def delete(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\nfrom luxon import register\nfrom luxon import router\nfrom luxon.helpers.api import sql_list, obj\n\nfrom infinitystone.models.roles import infinitystone_role\n\n\n@register.resources()\nclass Roles(object):\n def __init__(self):\n router.add('GET', '/v1/role/{id}', self.role,\n tag='roles:view')\n router.add('GET', '/v1/roles', self.roles,\n tag='roles:view')\n router.add('POST', '/v1/role', self.create,\n tag='roles:admin')\n router.add(['PUT', 'PATCH'], '/v1/role/{id}', self.update,\n tag='roles:admin')\n router.add('DELETE', '/v1/role/{id}', self.delete,\n tag='roles:admin')\n\n def role(self, req, resp, id):\n return obj(req, infinitystone_role, sql_id=id)\n\n def roles(self, req, resp):\n return sql_list(req, 'infinitystone_role',\n search={'id': str,\n 'name': str})\n\n def create(self, req, resp):\n role = obj(req, infinitystone_role)\n role.commit()\n return role\n\n def update(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n\n def delete(self, req, resp, id):\n role = obj(req, infinitystone_role, sql_id=id)\n role.commit()\n return role\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
from base64 import b64encode
from configparser import ConfigParser
import functools
from flask import (
Blueprint, flash, redirect, render_template, request, session, url_for, app
)
from requests.exceptions import SSLError
import spotipy
from spotipy import oauth2
bp = Blueprint('auth', __name__, url_prefix='/auth')
config = ConfigParser()
config.read('spotify.cfg')
CLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip("'")
CLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip("'")
REDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip("'")
SCOPE = 'user-read-currently-playing user-library-read playlist-read-private'
SP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SCOPE)
@bp.route('/login')
def login():
'''
: Create session and login user
: PARAMS None
: RETURN <view>
'''
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash("Connection error")
@bp.route('/callback/')
def callback():
'''
: Redirect user after login
: PARAMS None
: RETURN <view>
'''
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
# flash("Connection error")
return redirect(url_for('home'))
else:
flash("Cannot get access token")
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
'''
: Clear session and log user out
: PARAMS None
: RETURN <view>
'''
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
# flash("Connection error - please try again.")
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
|
normal
|
{
"blob_id": "8f7ecbe03e9a7a1d9df8cbe4596456e21b84653b",
"index": 9114,
"step-1": "<mask token>\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-2": "<mask token>\nconfig.read('spotify.cfg')\n<mask token>\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-3": "<mask token>\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,\n scope=SCOPE)\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-4": "from base64 import b64encode\nfrom configparser import ConfigParser\nimport functools\nfrom flask import Blueprint, flash, redirect, render_template, request, session, url_for, app\nfrom requests.exceptions import SSLError\nimport spotipy\nfrom spotipy import oauth2\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,\n scope=SCOPE)\n\n\n@bp.route('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\n@bp.route('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\n@bp.route('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-5": "from base64 import b64encode\nfrom configparser import ConfigParser\nimport functools\nfrom flask import (\n Blueprint, flash, redirect, render_template, request, session, url_for, app\n)\nfrom requests.exceptions import SSLError\nimport spotipy\nfrom spotipy import oauth2\n\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SCOPE)\n\n\n@bp.route('/login')\ndef login():\n '''\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n '''\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash(\"Connection error\")\n\n\n@bp.route('/callback/')\ndef callback():\n '''\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n '''\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n # flash(\"Connection error\")\n return redirect(url_for('home'))\n else:\n flash(\"Cannot get access token\")\n return redirect(url_for('home'))\n\n@bp.route('/logout')\ndef logout():\n '''\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n '''\n session.clear()\n return redirect(url_for('home'))\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n # flash(\"Connection error - please try again.\")\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n\n return wrapped_view\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
websocket_urlpatterns = [path('ws/notifications', NotificationsConsumer)]
<|reserved_special_token_1|>
from django.urls import path
from .consumers import NotificationsConsumer
websocket_urlpatterns = [path('ws/notifications', NotificationsConsumer)]
<|reserved_special_token_1|>
from django.urls import path
from .consumers import NotificationsConsumer
websocket_urlpatterns = [
path('ws/notifications', NotificationsConsumer),
]
|
flexible
|
{
"blob_id": "31e5b249516f4e9d57d8fd82713966a69e0516b4",
"index": 9185,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwebsocket_urlpatterns = [path('ws/notifications', NotificationsConsumer)]\n",
"step-3": "from django.urls import path\nfrom .consumers import NotificationsConsumer\nwebsocket_urlpatterns = [path('ws/notifications', NotificationsConsumer)]\n",
"step-4": "from django.urls import path\n\nfrom .consumers import NotificationsConsumer\n\nwebsocket_urlpatterns = [\n path('ws/notifications', NotificationsConsumer),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def bits2str(bits):
ret = ''
for row in bits:
rowstr = ''
for bit in row:
rowstr += '1' if bit else '0'
ret += rowstr + '\n'
return ret
def parse_xyi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert ipos > ypos
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]
)
def parse_xysi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
spos = inp.find('S')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert spos > ypos
assert ipos > spos
sval = int(inp[spos + 1:ipos])
assert sval == 0
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]
)
def anybits(bits):
for y in bits:
for x in y:
if not x:
return True
return False
def decodemux(bits):
A = not bits[0][0]
B = not bits[0][1]
C = not bits[0][2]
D = not bits[0][3]
E = not bits[1][0]
F = not bits[1][1]
G = not bits[1][2]
H = not bits[1][3]
assert G + C + D + H == 1
assert A + B + E + F == 1 or A + B + E + F == 0 and G
if G:
assert A + B + C + D + E + F + H == 0
if G:
return 0
if C:
if A:
return 1
if B:
return 2
if E:
return 3
if F:
return 4
if D:
if A:
return 5
if B:
return 6
if E:
return 7
if F:
return 8
if H:
if A:
return 9
if B:
return 10
if E:
return 11
if F:
return 12
<|reserved_special_token_0|>
def fliph(muxbits):
return [x[::-1] for x in muxbits]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def bits2str(bits):
ret = ''
for row in bits:
rowstr = ''
for bit in row:
rowstr += '1' if bit else '0'
ret += rowstr + '\n'
return ret
def parse_xyi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert ipos > ypos
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]
)
def parse_xysi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
spos = inp.find('S')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert spos > ypos
assert ipos > spos
sval = int(inp[spos + 1:ipos])
assert sval == 0
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]
)
def anybits(bits):
for y in bits:
for x in y:
if not x:
return True
return False
def decodemux(bits):
A = not bits[0][0]
B = not bits[0][1]
C = not bits[0][2]
D = not bits[0][3]
E = not bits[1][0]
F = not bits[1][1]
G = not bits[1][2]
H = not bits[1][3]
assert G + C + D + H == 1
assert A + B + E + F == 1 or A + B + E + F == 0 and G
if G:
assert A + B + C + D + E + F + H == 0
if G:
return 0
if C:
if A:
return 1
if B:
return 2
if E:
return 3
if F:
return 4
if D:
if A:
return 5
if B:
return 6
if E:
return 7
if F:
return 8
if H:
if A:
return 9
if B:
return 10
if E:
return 11
if F:
return 12
def flipv(muxbits):
return muxbits[::-1]
def fliph(muxbits):
return [x[::-1] for x in muxbits]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(sys.argv[1], 'r') as f:
x = json.load(f)
with open('my_wire_to_quartus_wire.json', 'r') as f:
wirenamemap = json.load(f)
print('----- There are {} muxes in the database'.format(len(x)))
print('----- There are {} routing pairs in the database'.format(sum(len(v) for
k, v in x.items())))
def bits2str(bits):
ret = ''
for row in bits:
rowstr = ''
for bit in row:
rowstr += '1' if bit else '0'
ret += rowstr + '\n'
return ret
def parse_xyi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert ipos > ypos
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]
)
def parse_xysi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
spos = inp.find('S')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert spos > ypos
assert ipos > spos
sval = int(inp[spos + 1:ipos])
assert sval == 0
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]
)
def anybits(bits):
for y in bits:
for x in y:
if not x:
return True
return False
def decodemux(bits):
A = not bits[0][0]
B = not bits[0][1]
C = not bits[0][2]
D = not bits[0][3]
E = not bits[1][0]
F = not bits[1][1]
G = not bits[1][2]
H = not bits[1][3]
assert G + C + D + H == 1
assert A + B + E + F == 1 or A + B + E + F == 0 and G
if G:
assert A + B + C + D + E + F + H == 0
if G:
return 0
if C:
if A:
return 1
if B:
return 2
if E:
return 3
if F:
return 4
if D:
if A:
return 5
if B:
return 6
if E:
return 7
if F:
return 8
if H:
if A:
return 9
if B:
return 10
if E:
return 11
if F:
return 12
def flipv(muxbits):
return muxbits[::-1]
def fliph(muxbits):
return [x[::-1] for x in muxbits]
LABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',
'| |0| | |0| | | | ', '| |0| | | |0| | | ',
'| |0| | | | |0| | ', '| |0| | | | | |0| ',
'| | |0| |0| | | | ', '| | |0| | |0| | | ',
'| | |0| | | |0| | ', '| | |0| | | | |0| ',
'| | | |0|0| | | | ', '| | | |0| |0| | | ',
'| | | |0| | |0| | ', '| | | |0| | | |0| ']
for dst, srcs in x.items():
srcs_decoded = [None] * 13
is_tb_io = False
for src, muxbits in srcs.items():
if dst.startswith('R:'):
_, _, I = parse_xyi(dst)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith('L:') or dst.startswith('L2'):
_, _, I = parse_xyi(dst)
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith('U:'):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 0 and X != 8:
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith('D:'):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 6 and X != 8:
muxbits = fliph(muxbits)
if I >= 3:
muxbits = flipv(muxbits)
elif dst.startswith('LOCAL_INTERCONNECT:'):
X, Y, I = parse_xysi(dst[19:])
if X == 1:
muxbits = fliph(muxbits)
if I > 8:
muxbits = flipv(muxbits)
elif X == 8:
if I > 8:
muxbits = flipv(muxbits)
elif Y == 0 or Y == 5:
is_tb_io = True
if Y == 0:
muxbits = flipv(muxbits)
if I < 5:
muxbits = fliph(muxbits)
else:
if I in range(0, 5) or I in range(13, 18):
muxbits = fliph(muxbits)
if I >= 13:
muxbits = flipv(muxbits)
else:
continue
muxidx = decodemux(muxbits)
if srcs_decoded[muxidx] is not None:
print(dst, src, srcs_decoded[muxidx])
assert srcs_decoded[muxidx] is None
srcs_decoded[muxidx] = src
print('~~~~~ {} ~~~~~'.format(dst))
print(LABELS[0])
if is_tb_io:
assert srcs_decoded[0] is None
for i in range(len(srcs_decoded)):
if is_tb_io and i == 0:
continue
print(LABELS[i + 1], end='')
src = srcs_decoded[i]
if src is None:
print('???')
else:
print(src, end='')
if src in wirenamemap:
print(' ({})'.format(wirenamemap[src]))
else:
print()
<|reserved_special_token_1|>
import json
import sys
with open(sys.argv[1], 'r') as f:
x = json.load(f)
with open('my_wire_to_quartus_wire.json', 'r') as f:
wirenamemap = json.load(f)
print('----- There are {} muxes in the database'.format(len(x)))
print('----- There are {} routing pairs in the database'.format(sum(len(v) for
k, v in x.items())))
def bits2str(bits):
ret = ''
for row in bits:
rowstr = ''
for bit in row:
rowstr += '1' if bit else '0'
ret += rowstr + '\n'
return ret
def parse_xyi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert ipos > ypos
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]
)
def parse_xysi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
spos = inp.find('S')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert spos > ypos
assert ipos > spos
sval = int(inp[spos + 1:ipos])
assert sval == 0
return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]
)
def anybits(bits):
for y in bits:
for x in y:
if not x:
return True
return False
def decodemux(bits):
A = not bits[0][0]
B = not bits[0][1]
C = not bits[0][2]
D = not bits[0][3]
E = not bits[1][0]
F = not bits[1][1]
G = not bits[1][2]
H = not bits[1][3]
assert G + C + D + H == 1
assert A + B + E + F == 1 or A + B + E + F == 0 and G
if G:
assert A + B + C + D + E + F + H == 0
if G:
return 0
if C:
if A:
return 1
if B:
return 2
if E:
return 3
if F:
return 4
if D:
if A:
return 5
if B:
return 6
if E:
return 7
if F:
return 8
if H:
if A:
return 9
if B:
return 10
if E:
return 11
if F:
return 12
def flipv(muxbits):
return muxbits[::-1]
def fliph(muxbits):
return [x[::-1] for x in muxbits]
LABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',
'| |0| | |0| | | | ', '| |0| | | |0| | | ',
'| |0| | | | |0| | ', '| |0| | | | | |0| ',
'| | |0| |0| | | | ', '| | |0| | |0| | | ',
'| | |0| | | |0| | ', '| | |0| | | | |0| ',
'| | | |0|0| | | | ', '| | | |0| |0| | | ',
'| | | |0| | |0| | ', '| | | |0| | | |0| ']
for dst, srcs in x.items():
srcs_decoded = [None] * 13
is_tb_io = False
for src, muxbits in srcs.items():
if dst.startswith('R:'):
_, _, I = parse_xyi(dst)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith('L:') or dst.startswith('L2'):
_, _, I = parse_xyi(dst)
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith('U:'):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 0 and X != 8:
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith('D:'):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 6 and X != 8:
muxbits = fliph(muxbits)
if I >= 3:
muxbits = flipv(muxbits)
elif dst.startswith('LOCAL_INTERCONNECT:'):
X, Y, I = parse_xysi(dst[19:])
if X == 1:
muxbits = fliph(muxbits)
if I > 8:
muxbits = flipv(muxbits)
elif X == 8:
if I > 8:
muxbits = flipv(muxbits)
elif Y == 0 or Y == 5:
is_tb_io = True
if Y == 0:
muxbits = flipv(muxbits)
if I < 5:
muxbits = fliph(muxbits)
else:
if I in range(0, 5) or I in range(13, 18):
muxbits = fliph(muxbits)
if I >= 13:
muxbits = flipv(muxbits)
else:
continue
muxidx = decodemux(muxbits)
if srcs_decoded[muxidx] is not None:
print(dst, src, srcs_decoded[muxidx])
assert srcs_decoded[muxidx] is None
srcs_decoded[muxidx] = src
print('~~~~~ {} ~~~~~'.format(dst))
print(LABELS[0])
if is_tb_io:
assert srcs_decoded[0] is None
for i in range(len(srcs_decoded)):
if is_tb_io and i == 0:
continue
print(LABELS[i + 1], end='')
src = srcs_decoded[i]
if src is None:
print('???')
else:
print(src, end='')
if src in wirenamemap:
print(' ({})'.format(wirenamemap[src]))
else:
print()
<|reserved_special_token_1|>
import json
import sys
with open(sys.argv[1], 'r') as f:
x = json.load(f)
with open('my_wire_to_quartus_wire.json', 'r') as f:
wirenamemap = json.load(f)
print("----- There are {} muxes in the database".format(len(x)))
print("----- There are {} routing pairs in the database".format(sum((len(v) for k, v in x.items()))))
def bits2str(bits):
ret = ""
for row in bits:
rowstr = ""
for bit in row:
rowstr += "1" if bit else "0"
ret += rowstr + '\n'
return ret
def parse_xyi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert ipos > ypos
return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]))
def parse_xysi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
spos = inp.find('S')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert spos > ypos
assert ipos > spos
sval = int(inp[spos + 1:ipos])
assert sval == 0
return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]))
def anybits(bits):
for y in bits:
for x in y:
if not x:
return True
return False
def decodemux(bits):
A = not bits[0][0]
B = not bits[0][1]
C = not bits[0][2]
D = not bits[0][3]
E = not bits[1][0]
F = not bits[1][1]
G = not bits[1][2]
H = not bits[1][3]
assert G + C + D + H == 1
assert A + B + E + F == 1 or (A + B + E + F == 0 and G)
if G:
assert A + B + C + D + E + F + H == 0
if G:
return 0
if C:
if A: return 1
if B: return 2
if E: return 3
if F: return 4
if D:
if A: return 5
if B: return 6
if E: return 7
if F: return 8
if H:
if A: return 9
if B: return 10
if E: return 11
if F: return 12
def flipv(muxbits):
return muxbits[::-1]
def fliph(muxbits):
return [x[::-1] for x in muxbits]
# # print(x)
# uniq_r_muxes = []
# for _ in range(8):
# uniq_r_muxes.append(set())
# for X in range(2, 8):
# for Y in range(1, 5):
# for N in range(8):
# mux = "R:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_r_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(8):
# print("~~~~~ R{} ~~~~~".format(N))
# for xx in sorted(list(uniq_r_muxes[N])):
# print(xx)
# # print(x)
# uniq_l_muxes = []
# for _ in range(8):
# uniq_l_muxes.append(set())
# # print(x)
# uniq_l2_muxes = []
# for _ in range(8):
# uniq_l2_muxes.append(set())
# for X in [8]:
# for Y in range(1, 5):
# for N in range(8):
# mux = "L2:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_l2_muxes[N].add(bits2str(muxbits))
# # print(uniq_l2_muxes)
# for N in range(8):
# print("~~~~~ L2:{} ~~~~~".format(N))
# for xx in sorted(list(uniq_l2_muxes[N])):
# print(xx)
# # print(x)
# uniq_l_muxes = []
# for _ in range(8):
# uniq_l_muxes.append(set())
# for X in range(3, 9):
# for Y in range(1, 5):
# for N in range(8):
# mux = "L:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_l_muxes[N].add(bits2str(muxbits))
# # print(uniq_l_muxes)
# for N in range(8):
# print("~~~~~ L{} ~~~~~".format(N))
# for xx in sorted(list(uniq_l_muxes[N])):
# print(xx)
# uniq_u_muxes = []
# for _ in range(7):
# uniq_u_muxes.append(set())
# for X in [8]:#range(2, 8):
# for Y in range(1, 5):
# for N in range(7):
# mux = "U:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_u_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(7):
# print("~~~~~ U{} ~~~~~".format(N))
# for xx in sorted(list(uniq_u_muxes[N])):
# print(xx)
# uniq_d_muxes = []
# for _ in range(7):
# uniq_d_muxes.append(set())
# for X in [8]:#range(2, 8):
# for Y in range(1, 5):
# for N in range(7):
# mux = "D:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_d_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(7):
# print("~~~~~ D{} ~~~~~".format(N))
# for xx in sorted(list(uniq_d_muxes[N])):
# print(xx)
# uniq_l_li_muxes = []
# for _ in range(18):
# uniq_l_li_muxes.append(set())
# for Y in range(1, 5):
# for N in range(18):
# mux = "LOCAL_INTERCONNECT:X1Y{}S0I{}".format(Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_l_li_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(18):
# print("~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~".format(N))
# for xx in sorted(list(uniq_l_li_muxes[N])):
# print(xx)
# uniq_li_muxes = []
# for _ in range(26):
# uniq_li_muxes.append(set())
# for X in range(2, 8):
# for Y in range(1, 5):
# for N in range(26):
# mux = "LOCAL_INTERCONNECT:X{}Y{}S0I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_li_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(26):
# print("~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~".format(N))
# for xx in sorted(list(uniq_li_muxes[N])):
# print(xx)
# uniq_top_li_muxes = []
# for _ in range(10):
# uniq_top_li_muxes.append(set())
# for X in range(2, 8):
# for N in range(10):
# mux = "LOCAL_INTERCONNECT:X{}Y5S0I{}".format(X, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_top_li_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(10):
# print("~~~~~ LOCAL_INTERCONNECT:Y5 {} ~~~~~".format(N))
# for xx in sorted(list(uniq_top_li_muxes[N])):
# print(xx)
LABELS = [
"|G|C|D|H|A|B|E|F|",
"|0| | | | | | | | ",
"| |0| | |0| | | | ",
"| |0| | | |0| | | ",
"| |0| | | | |0| | ",
"| |0| | | | | |0| ",
"| | |0| |0| | | | ",
"| | |0| | |0| | | ",
"| | |0| | | |0| | ",
"| | |0| | | | |0| ",
"| | | |0|0| | | | ",
"| | | |0| |0| | | ",
"| | | |0| | |0| | ",
"| | | |0| | | |0| ",
]
for dst, srcs in x.items():
srcs_decoded = [None] * 13
is_tb_io = False
for src, muxbits in srcs.items():
if dst.startswith("R:"):
_, _, I = parse_xyi(dst)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith("L:") or dst.startswith("L2"):
_, _, I = parse_xyi(dst)
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith("U:"):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 0 and X != 8:
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith("D:"):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 6 and X != 8:
muxbits = fliph(muxbits)
if I >= 3:
muxbits = flipv(muxbits)
elif dst.startswith("LOCAL_INTERCONNECT:"):
X, Y, I = parse_xysi(dst[19:])
if X == 1:
muxbits = fliph(muxbits)
if I > 8:
muxbits = flipv(muxbits)
elif X == 8:
if I > 8:
muxbits = flipv(muxbits)
else:
if Y == 0 or Y == 5:
is_tb_io = True
if Y == 0:
muxbits = flipv(muxbits)
if I < 5:
muxbits = fliph(muxbits)
else:
if I in range(0, 5) or I in range(13, 18):
muxbits = fliph(muxbits)
if I >= 13:
muxbits = flipv(muxbits)
else:
continue
muxidx = decodemux(muxbits)
if srcs_decoded[muxidx] is not None:
print(dst, src, srcs_decoded[muxidx])
assert srcs_decoded[muxidx] is None
srcs_decoded[muxidx] = src
print("~~~~~ {} ~~~~~".format(dst))
print(LABELS[0])
if is_tb_io:
assert srcs_decoded[0] is None
for i in range(len(srcs_decoded)):
if is_tb_io and i == 0:
continue
print(LABELS[i + 1], end='')
src = srcs_decoded[i]
if src is None:
print("???")
else:
print(src, end='')
if src in wirenamemap:
print(" ({})".format(wirenamemap[src]))
else:
print()
# if dst.startswith("LOCAL_INTERCONNECT:"):
# continue
# print(dst, src)
# if dst.startswith("L:"):
# _, _, I = parse_xyi(dst)
# muxbits = fliph(muxbits)
# if I >= 4:
# muxbits = flipv(muxbits)
# if dst.startswith("R:"):
# _, _, I = parse_xyi(dst)
# if I >= 4:
# muxbits = flipv(muxbits)
# if dst.startswith("D:"):
# X, _, I = parse_xyi(dst)
# if I >= 3:
# muxbits = flipv(muxbits)
# if I == 6:
# muxbits = fliph(muxbits)
# if X == 8:
# muxbits = fliph(muxbits)
# if dst.startswith("U:"):
# X, _, I = parse_xyi(dst)
# if I >= 4:
# muxbits = flipv(muxbits)
# if I == 0:
# muxbits = fliph(muxbits)
# if X == 8:
# muxbits = fliph(muxbits)
# if dst.startswith("L2:"):
# _, _, I = parse_xyi(dst)
# if I >= 4:
# muxbits = flipv(muxbits)
# decodemux(muxbits)
|
flexible
|
{
"blob_id": "95163a28a35cc88240d9d6edc2e9b416e5493909",
"index": 6021,
"step-1": "<mask token>\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\n<mask token>\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\nprint('----- There are {} muxes in the database'.format(len(x)))\nprint('----- There are {} routing pairs in the database'.format(sum(len(v) for\n k, v in x.items())))\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\nLABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',\n '| |0| | |0| | | | ', '| |0| | | |0| | | ',\n '| |0| | | | |0| | ', '| |0| | | | | |0| ',\n '| | |0| |0| | | | ', '| | |0| | |0| | | ',\n '| | |0| | | |0| | ', '| | |0| | | | |0| ',\n '| | | |0|0| | | | ', '| | | |0| |0| | | ',\n '| | | |0| | |0| | ', '| | | |0| | | |0| ']\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith('R:'):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('L:') or dst.startswith('L2'):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('U:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('D:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith('LOCAL_INTERCONNECT:'):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n elif Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n muxidx = decodemux(muxbits)\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n print('~~~~~ {} ~~~~~'.format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print('???')\n else:\n print(src, end='')\n if src in wirenamemap:\n print(' ({})'.format(wirenamemap[src]))\n else:\n print()\n",
"step-4": "import json\nimport sys\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\nprint('----- There are {} muxes in the database'.format(len(x)))\nprint('----- There are {} routing pairs in the database'.format(sum(len(v) for\n k, v in x.items())))\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\nLABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',\n '| |0| | |0| | | | ', '| |0| | | |0| | | ',\n '| |0| | | | |0| | ', '| |0| | | | | |0| ',\n '| | |0| |0| | | | ', '| | |0| | |0| | | ',\n '| | |0| | | |0| | ', '| | |0| | | | |0| ',\n '| | | |0|0| | | | ', '| | | |0| |0| | | ',\n '| | | |0| | |0| | ', '| | | |0| | | |0| ']\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith('R:'):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('L:') or dst.startswith('L2'):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('U:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('D:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith('LOCAL_INTERCONNECT:'):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n elif Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n muxidx = decodemux(muxbits)\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n print('~~~~~ {} ~~~~~'.format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print('???')\n else:\n print(src, end='')\n if src in wirenamemap:\n print(' ({})'.format(wirenamemap[src]))\n else:\n print()\n",
"step-5": "import json\nimport sys\n\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\n\nprint(\"----- There are {} muxes in the database\".format(len(x)))\nprint(\"----- There are {} routing pairs in the database\".format(sum((len(v) for k, v in x.items()))))\n\ndef bits2str(bits):\n ret = \"\"\n for row in bits:\n rowstr = \"\"\n for bit in row:\n rowstr += \"1\" if bit else \"0\"\n ret += rowstr + '\\n'\n return ret\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n\n return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]))\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n\n return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]))\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or (A + B + E + F == 0 and G)\n if G:\n assert A + B + C + D + E + F + H == 0\n\n if G:\n return 0\n if C:\n if A: return 1\n if B: return 2\n if E: return 3\n if F: return 4\n if D:\n if A: return 5\n if B: return 6\n if E: return 7\n if F: return 8\n if H:\n if A: return 9\n if B: return 10\n if E: return 11\n if F: return 12\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n# # print(x)\n# uniq_r_muxes = []\n# for _ in range(8):\n# uniq_r_muxes.append(set())\n\n# for X in range(2, 8):\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"R:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_r_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(8):\n# print(\"~~~~~ R{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_r_muxes[N])):\n# print(xx)\n\n# # print(x)\n# uniq_l_muxes = []\n# for _ in range(8):\n# uniq_l_muxes.append(set())\n\n# # print(x)\n# uniq_l2_muxes = []\n# for _ in range(8):\n# uniq_l2_muxes.append(set())\n\n# for X in [8]:\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"L2:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l2_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_l2_muxes)\n# for N in range(8):\n# print(\"~~~~~ L2:{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l2_muxes[N])):\n# print(xx)\n\n# # print(x)\n# uniq_l_muxes = []\n# for _ in range(8):\n# uniq_l_muxes.append(set())\n\n# for X in range(3, 9):\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"L:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_l_muxes)\n# for N in range(8):\n# print(\"~~~~~ L{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l_muxes[N])):\n# print(xx)\n\n# uniq_u_muxes = []\n# for _ in range(7):\n# uniq_u_muxes.append(set())\n\n# for X in [8]:#range(2, 8):\n# for Y in range(1, 5):\n# for N in range(7):\n# mux = \"U:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_u_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(7):\n# print(\"~~~~~ U{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_u_muxes[N])):\n# print(xx)\n\n# uniq_d_muxes = []\n# for _ in range(7):\n# uniq_d_muxes.append(set())\n\n# for X in [8]:#range(2, 8):\n# for Y in range(1, 5):\n# for N in range(7):\n# mux = \"D:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_d_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(7):\n# print(\"~~~~~ D{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_d_muxes[N])):\n# print(xx)\n\n# uniq_l_li_muxes = []\n# for _ in range(18):\n# uniq_l_li_muxes.append(set())\n\n# for Y in range(1, 5):\n# for N in range(18):\n# mux = \"LOCAL_INTERCONNECT:X1Y{}S0I{}\".format(Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(18):\n# print(\"~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l_li_muxes[N])):\n# print(xx)\n\n# uniq_li_muxes = []\n# for _ in range(26):\n# uniq_li_muxes.append(set())\n\n# for X in range(2, 8):\n# for Y in range(1, 5):\n# for N in range(26):\n# mux = \"LOCAL_INTERCONNECT:X{}Y{}S0I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(26):\n# print(\"~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_li_muxes[N])):\n# print(xx)\n\n# uniq_top_li_muxes = []\n# for _ in range(10):\n# uniq_top_li_muxes.append(set())\n\n# for X in range(2, 8):\n# for N in range(10):\n# mux = \"LOCAL_INTERCONNECT:X{}Y5S0I{}\".format(X, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_top_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(10):\n# print(\"~~~~~ LOCAL_INTERCONNECT:Y5 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_top_li_muxes[N])):\n# print(xx)\n\nLABELS = [\n \"|G|C|D|H|A|B|E|F|\",\n \"|0| | | | | | | | \",\n \"| |0| | |0| | | | \",\n \"| |0| | | |0| | | \",\n \"| |0| | | | |0| | \",\n \"| |0| | | | | |0| \",\n \"| | |0| |0| | | | \",\n \"| | |0| | |0| | | \",\n \"| | |0| | | |0| | \",\n \"| | |0| | | | |0| \",\n \"| | | |0|0| | | | \",\n \"| | | |0| |0| | | \",\n \"| | | |0| | |0| | \",\n \"| | | |0| | | |0| \",\n]\n\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith(\"R:\"):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"L:\") or dst.startswith(\"L2\"):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"U:\"):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"D:\"):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"LOCAL_INTERCONNECT:\"):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n else:\n if Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n\n muxidx = decodemux(muxbits)\n\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n\n print(\"~~~~~ {} ~~~~~\".format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print(\"???\")\n else:\n print(src, end='')\n if src in wirenamemap:\n print(\" ({})\".format(wirenamemap[src]))\n else:\n print()\n\n # if dst.startswith(\"LOCAL_INTERCONNECT:\"):\n # continue\n\n # print(dst, src)\n\n # if dst.startswith(\"L:\"):\n # _, _, I = parse_xyi(dst)\n # muxbits = fliph(muxbits)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if dst.startswith(\"R:\"):\n # _, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if dst.startswith(\"D:\"):\n # X, _, I = parse_xyi(dst)\n # if I >= 3:\n # muxbits = flipv(muxbits)\n # if I == 6:\n # muxbits = fliph(muxbits)\n # if X == 8:\n # muxbits = fliph(muxbits)\n # if dst.startswith(\"U:\"):\n # X, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if I == 0:\n # muxbits = fliph(muxbits)\n # if X == 8:\n # muxbits = fliph(muxbits)\n # if dst.startswith(\"L2:\"):\n # _, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n\n # decodemux(muxbits)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
class Ball(pygame.sprite.Sprite):
def __init__(self, color, width, height):
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, color, [0, 0, width, height])
self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,
5, 6, 7, 8]), randint(-8, 8)]
self.rect = self.image.get_rect()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ball(pygame.sprite.Sprite):
def __init__(self, color, width, height):
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, color, [0, 0, width, height])
self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,
5, 6, 7, 8]), randint(-8, 8)]
self.rect = self.image.get_rect()
def update(self):
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
<|reserved_special_token_0|>
def destroy(self):
self.kill()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BLACK = 0, 0, 0
class Ball(pygame.sprite.Sprite):
def __init__(self, color, width, height):
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, color, [0, 0, width, height])
self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,
5, 6, 7, 8]), randint(-8, 8)]
self.rect = self.image.get_rect()
def update(self):
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
def bounce(self):
self.velocity[0] = -self.velocity[0]
self.velocity[1] = randint(-8, 8)
def destroy(self):
self.kill()
<|reserved_special_token_1|>
import pygame
from random import randint, choice
BLACK = 0, 0, 0
class Ball(pygame.sprite.Sprite):
def __init__(self, color, width, height):
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, color, [0, 0, width, height])
self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,
5, 6, 7, 8]), randint(-8, 8)]
self.rect = self.image.get_rect()
def update(self):
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
def bounce(self):
self.velocity[0] = -self.velocity[0]
self.velocity[1] = randint(-8, 8)
def destroy(self):
self.kill()
<|reserved_special_token_1|>
import pygame
from random import randint, choice
BLACK = (0,0,0)
#----------------------------------------------------------
class Ball(pygame.sprite.Sprite):
#------------------------------------------------------
def __init__(self, color, width, height):
# Initialize as a Sprite
super().__init__()
# Draw the ball
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, color, [0, 0, width, height])
# Set initial velocity (speed and direction)
self.velocity = [choice([-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8]), \
randint(-8,8)]
# Get the dimensions/location
self.rect = self.image.get_rect()
#------------------------------------------------------
def update(self):
# Update the position
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
#------------------------------------------------------
def bounce(self):
# Bounce off the paddle with random y velocity
self.velocity[0] = -self.velocity[0]
self.velocity[1] = randint(-8,8)
#------------------------------------------------------
def destroy(self):
# Destroy the object. Happens if
# the left or right wall is hit
self.kill()
#----------------------------------------------------------
|
flexible
|
{
"blob_id": "6f216420f641c042bb2772b79c10f904ffa21938",
"index": 8733,
"step-1": "<mask token>\n\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, color, width, height):\n super().__init__()\n self.image = pygame.Surface([width, height])\n self.image.fill(BLACK)\n self.image.set_colorkey(BLACK)\n pygame.draw.rect(self.image, color, [0, 0, width, height])\n self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,\n 5, 6, 7, 8]), randint(-8, 8)]\n self.rect = self.image.get_rect()\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, color, width, height):\n super().__init__()\n self.image = pygame.Surface([width, height])\n self.image.fill(BLACK)\n self.image.set_colorkey(BLACK)\n pygame.draw.rect(self.image, color, [0, 0, width, height])\n self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,\n 5, 6, 7, 8]), randint(-8, 8)]\n self.rect = self.image.get_rect()\n\n def update(self):\n self.rect.x += self.velocity[0]\n self.rect.y += self.velocity[1]\n <mask token>\n\n def destroy(self):\n self.kill()\n",
"step-3": "<mask token>\nBLACK = 0, 0, 0\n\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, color, width, height):\n super().__init__()\n self.image = pygame.Surface([width, height])\n self.image.fill(BLACK)\n self.image.set_colorkey(BLACK)\n pygame.draw.rect(self.image, color, [0, 0, width, height])\n self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,\n 5, 6, 7, 8]), randint(-8, 8)]\n self.rect = self.image.get_rect()\n\n def update(self):\n self.rect.x += self.velocity[0]\n self.rect.y += self.velocity[1]\n\n def bounce(self):\n self.velocity[0] = -self.velocity[0]\n self.velocity[1] = randint(-8, 8)\n\n def destroy(self):\n self.kill()\n",
"step-4": "import pygame\nfrom random import randint, choice\nBLACK = 0, 0, 0\n\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, color, width, height):\n super().__init__()\n self.image = pygame.Surface([width, height])\n self.image.fill(BLACK)\n self.image.set_colorkey(BLACK)\n pygame.draw.rect(self.image, color, [0, 0, width, height])\n self.velocity = [choice([-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4,\n 5, 6, 7, 8]), randint(-8, 8)]\n self.rect = self.image.get_rect()\n\n def update(self):\n self.rect.x += self.velocity[0]\n self.rect.y += self.velocity[1]\n\n def bounce(self):\n self.velocity[0] = -self.velocity[0]\n self.velocity[1] = randint(-8, 8)\n\n def destroy(self):\n self.kill()\n",
"step-5": "import pygame\nfrom random import randint, choice\n\nBLACK = (0,0,0)\n\n#----------------------------------------------------------\n\nclass Ball(pygame.sprite.Sprite):\n \n #------------------------------------------------------\n def __init__(self, color, width, height):\n # Initialize as a Sprite\n super().__init__()\n\n # Draw the ball\n self.image = pygame.Surface([width, height])\n self.image.fill(BLACK)\n self.image.set_colorkey(BLACK)\n\n pygame.draw.rect(self.image, color, [0, 0, width, height])\n\n # Set initial velocity (speed and direction)\n self.velocity = [choice([-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8]), \\\n randint(-8,8)]\n \n # Get the dimensions/location\n self.rect = self.image.get_rect()\n\n #------------------------------------------------------\n def update(self):\n # Update the position\n self.rect.x += self.velocity[0]\n self.rect.y += self.velocity[1]\n\n #------------------------------------------------------\n def bounce(self):\n # Bounce off the paddle with random y velocity\n self.velocity[0] = -self.velocity[0]\n self.velocity[1] = randint(-8,8)\n\n #------------------------------------------------------\n def destroy(self):\n # Destroy the object. Happens if\n # the left or right wall is hit\n self.kill()\n\n#----------------------------------------------------------\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
"""social_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth.views import (password_reset, password_reset_done, password_reset_complete,
password_reset_confirm, password_change, password_change_done)
from django.conf import settings
from django.conf.urls.static import static
from account.views import dashboard
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('account.urls'), name='account'),
path('images/', include('images.urls', namespace='images')),
path('password_reset/', password_reset, {'template_name': 'registration/password_reset.html'}, name='password_reset'),
path('password_reset/done/', password_reset_done, name='password_reset_done'),
path('password_reset/confirm/<str:uidb64>/<str:token>/', password_reset_confirm, name='password_reset_confirm'),
path('password_reset/complete/', password_reset_complete, name='password_reset_complete'),
path('password_change/', password_change, name='password_change'),
path('password_change/done/', password_change_done, name='password_change_done'),
path('', dashboard, name='dashboard'),
path('social-auth/', include('social_django.urls', namespace='social')),
path('api/accounts/', include('account.api.urls', namespace='api-accounts')),
path('api/images/', include('images.api.urls', namespace='api-images')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "bf1221bc9768cff2edb67e0e5f5cea0ee2dd64e5",
"index": 7740,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-3": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('account/', include(\n 'account.urls'), name='account'), path('images/', include('images.urls',\n namespace='images')), path('password_reset/', password_reset, {\n 'template_name': 'registration/password_reset.html'}, name=\n 'password_reset'), path('password_reset/done/', password_reset_done,\n name='password_reset_done'), path(\n 'password_reset/confirm/<str:uidb64>/<str:token>/',\n password_reset_confirm, name='password_reset_confirm'), path(\n 'password_reset/complete/', password_reset_complete, name=\n 'password_reset_complete'), path('password_change/', password_change,\n name='password_change'), path('password_change/done/',\n password_change_done, name='password_change_done'), path('', dashboard,\n name='dashboard'), path('social-auth/', include('social_django.urls',\n namespace='social')), path('api/accounts/', include('account.api.urls',\n namespace='api-accounts')), path('api/images/', include(\n 'images.api.urls', namespace='api-images'))]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth.views import password_reset, password_reset_done, password_reset_complete, password_reset_confirm, password_change, password_change_done\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom account.views import dashboard\nurlpatterns = [path('admin/', admin.site.urls), path('account/', include(\n 'account.urls'), name='account'), path('images/', include('images.urls',\n namespace='images')), path('password_reset/', password_reset, {\n 'template_name': 'registration/password_reset.html'}, name=\n 'password_reset'), path('password_reset/done/', password_reset_done,\n name='password_reset_done'), path(\n 'password_reset/confirm/<str:uidb64>/<str:token>/',\n password_reset_confirm, name='password_reset_confirm'), path(\n 'password_reset/complete/', password_reset_complete, name=\n 'password_reset_complete'), path('password_change/', password_change,\n name='password_change'), path('password_change/done/',\n password_change_done, name='password_change_done'), path('', dashboard,\n name='dashboard'), path('social-auth/', include('social_django.urls',\n namespace='social')), path('api/accounts/', include('account.api.urls',\n namespace='api-accounts')), path('api/images/', include(\n 'images.api.urls', namespace='api-images'))]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-5": "\"\"\"social_website URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth.views import (password_reset, password_reset_done, password_reset_complete,\n password_reset_confirm, password_change, password_change_done)\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom account.views import dashboard\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('account/', include('account.urls'), name='account'),\n path('images/', include('images.urls', namespace='images')),\n path('password_reset/', password_reset, {'template_name': 'registration/password_reset.html'}, name='password_reset'),\n path('password_reset/done/', password_reset_done, name='password_reset_done'),\n path('password_reset/confirm/<str:uidb64>/<str:token>/', password_reset_confirm, name='password_reset_confirm'),\n path('password_reset/complete/', password_reset_complete, name='password_reset_complete'),\n path('password_change/', password_change, name='password_change'),\n path('password_change/done/', password_change_done, name='password_change_done'),\n path('', dashboard, name='dashboard'),\n path('social-auth/', include('social_django.urls', namespace='social')),\n path('api/accounts/', include('account.api.urls', namespace='api-accounts')),\n path('api/images/', include('images.api.urls', namespace='api-images')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ''
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ''
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
<|reserved_special_token_0|>
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ''
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
<|reserved_special_token_1|>
from unittest import TestCase
from unittest.mock import patch, mock_open, call
from network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException
from network_simulator.service import NetworkSimulatorService
from network_simulator.service.network_simulator_service import Device
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ''
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
def test_registerNewDevice(self):
self.network_svc.register_new_device(self.device_data_dict)
self.assertIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ''
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
<|reserved_special_token_1|>
from unittest import TestCase
from unittest.mock import patch, mock_open, call
from network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException
from network_simulator.service import NetworkSimulatorService
from network_simulator.service.network_simulator_service import Device
class TestNetworkSimulatorService(TestCase):
@patch("network_simulator.service.network_topology_handler.write_network_topology_to_file")
def setUp(self, write_network_topology_to_file_mock):
self.device_id = "testid"
self.device_type = "vm"
self.tap_if_name = "testtap"
self.device_data_dict = {
"device_id": self.device_id,
"device_type": self.device_type,
"tap_if_name": self.tap_if_name,
"xpos": 5.0,
"ypos": 3.0
}
self.test_net_namespace = "testns"
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = "{},{},{}".format(self.device_id, self.device_type, self.tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ""
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
def test_registerNewDevice(self):
self.network_svc.register_new_device(self.device_data_dict)
self.assertIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ""
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
# helper
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
|
flexible
|
{
"blob_id": "8e854398084e89b0b8436d6b0a2bf8f36a9c7bd5",
"index": 187,
"step-1": "<mask token>\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n <mask token>\n <mask token>\n <mask token>\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-2": "<mask token>\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = ''\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n <mask token>\n <mask token>\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-3": "<mask token>\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = ''\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n <mask token>\n\n def test_deregisterDevice(self):\n self.network_svc.devices[self.device_id] = ''\n self.network_svc.deregister_device(self.device_id)\n self.assertNotIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-4": "from unittest import TestCase\nfrom unittest.mock import patch, mock_open, call\nfrom network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException\nfrom network_simulator.service import NetworkSimulatorService\nfrom network_simulator.service.network_simulator_service import Device\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = ''\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n\n def test_registerNewDevice(self):\n self.network_svc.register_new_device(self.device_data_dict)\n self.assertIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterDevice(self):\n self.network_svc.devices[self.device_id] = ''\n self.network_svc.deregister_device(self.device_id)\n self.assertNotIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-5": "from unittest import TestCase\nfrom unittest.mock import patch, mock_open, call\n\nfrom network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException\nfrom network_simulator.service import NetworkSimulatorService\nfrom network_simulator.service.network_simulator_service import Device\n\n\nclass TestNetworkSimulatorService(TestCase):\n @patch(\"network_simulator.service.network_topology_handler.write_network_topology_to_file\")\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = \"testid\"\n self.device_type = \"vm\"\n self.tap_if_name = \"testtap\"\n self.device_data_dict = {\n \"device_id\": self.device_id,\n \"device_type\": self.device_type,\n \"tap_if_name\": self.tap_if_name,\n \"xpos\": 5.0,\n \"ypos\": 3.0\n }\n self.test_net_namespace = \"testns\"\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = \"{},{},{}\".format(self.device_id, self.device_type, self.tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = \"\"\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n\n def test_registerNewDevice(self):\n self.network_svc.register_new_device(self.device_data_dict)\n self.assertIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterDevice(self):\n self.network_svc.devices[self.device_id] = \"\"\n self.network_svc.deregister_device(self.device_id)\n self.assertNotIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n # helper\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n\n\n\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
from .settings import *
# Heroku Configurations
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# loading local_settings.py
try:
from .local_settings import *
except Exception as e:
pass
# ALLAUTH configuration
# Specific the login method to use
ACCOUNT_USERNAME_REQUIRED = False
# ACCOUNT_AUTHENTICATION_METHOD = "username", "email", "username_email"
# Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”.
# When set to “mandatory” the user is blocked from logging in until the email address is verified.
# Choose “optional” or “none” to allow logins with an unverified e-mail address.
# In case of “optional”, the e-mail verification mail is still sent,
# whereas in case of “none” no e-mail verification mails are sent.
ACCOUNT_EMAIL_VERIFICATION = "none"
# Determines whether or not the user is automatically logged out by a mere GET request.
# See documentation for the LogoutView for details.
ACCOUNT_LOGOUT_ON_GET = False
# Request e-mail address from 3rd import party account provider?
# E.g. using OpenID AX, or the Facebook “email” permission.
SOCIALACCOUNT_QUERY_EMAIL = True
# Dictionary containing provider specific settings.
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
# we use facebook js_sdk instead od oauth2
'METHOD': 'js_sdk',
'SCOPE': ['email', 'public_profile', 'user_friends'],
# using AUTH_PARAMS to pass along other parametees
# to the FB.login JS SDK call
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
# field are fetch from the import Graph API
'FIELDS': ['first_name', 'last_name', 'email', 'birthday'],
# JS SDK return a short-lived token suitable for client-side use.
'EXCHANGE_TOKEN': True,
# Chose the current active language of the request
'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False,
# Facebook Graph API version
'VERSION': 'v2.7'
},
'linkedin': {
'SCOPE': ['r_emailaddress'],
'PROFILE_FIELDS': [
'id',
'first-name',
'last-name',
'email-address',
'public-profile-url'
]
}
}
# login redirect url
LOGIN_REDIRECT_URL = "/blog/jobs"
# Default settings
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields
'set_required': True,
# Set HTML disabled attribute on disabled fields
'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
# Axes Configurations
# Number of login attempts allowed before a record is created for the failed logins.
AXES_LOGIN_FAILURE_LIMIT = 3
# After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)?
AXES_LOCK_OUT_AT_FAILURE = True
# If True, lock out / log based on an IP address AND a user agent. This means requests from different import user
# agents but from the import same IP are treated differently.
AXES_USE_USER_AGENT = True
# Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a
# python timedelta object or an integer, if you set it to be integer it will represent a number of hours
AXES_COOLOFF_TIME = 50
# Specifies a logging mechanism for axes to use
AXES_LOCKOUT_TEMPLATE = 'axes.watch_login'
# Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as
# context variables
AXES_LOCKOUT_TEMPLATE = None
# Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template
# will be used
AXES_LOCKOUT_URL = None
# If Truem you'll see slightly more logging for Axes
AXES_VERBOSE = True
# The name of the for field that contains your usernames
# AXES_USERNAME_FORM_FIELD = username
# If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on
# IP. Default: False
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False
# Crispy forms will use BOOTSTRAP3 TEMPLATE PACK
CRISPY_TEMPLATE_PACK = "bootstrap3"
# Signal Admins Configurations
ADMINS = (
("Petar Pilipovic", "petar@literatillc.com"),
)
# RESTframework Permission classes configuration
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
]
}
|
normal
|
{
"blob_id": "8bb86cae3387a0d4ce5987f3e3c458c8298174e0",
"index": 7342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\n<mask token>\n",
"step-3": "<mask token>\nDATABASES = {'default': dj_database_url.config()}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'none'\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [\n 'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':\n 'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',\n 'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [\n 'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',\n 'email-address', 'public-profile-url']}}\nLOGIN_REDIRECT_URL = '/blog/jobs'\nBOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':\n '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,\n 'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,\n 'include_jquery': False, 'horizontal_label_class': 'col-md-3',\n 'horizontal_field_class': 'col-md-9', 'set_required': True,\n 'set_disabled': False, 'set_placeholder': True, 'required_css_class':\n '', 'error_css_class': 'has-error', 'success_css_class': 'has-success',\n 'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'\n }, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},\n 'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}\nAXES_LOGIN_FAILURE_LIMIT = 3\nAXES_LOCK_OUT_AT_FAILURE = True\nAXES_USE_USER_AGENT = True\nAXES_COOLOFF_TIME = 50\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE = None\nAXES_LOCKOUT_URL = None\nAXES_VERBOSE = True\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nADMINS = ('Petar Pilipovic', 'petar@literatillc.com'),\nREST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}\n",
"step-4": "from .settings import *\nimport dj_database_url\nDATABASES = {'default': dj_database_url.config()}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'none'\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [\n 'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':\n 'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',\n 'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [\n 'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',\n 'email-address', 'public-profile-url']}}\nLOGIN_REDIRECT_URL = '/blog/jobs'\nBOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':\n '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,\n 'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,\n 'include_jquery': False, 'horizontal_label_class': 'col-md-3',\n 'horizontal_field_class': 'col-md-9', 'set_required': True,\n 'set_disabled': False, 'set_placeholder': True, 'required_css_class':\n '', 'error_css_class': 'has-error', 'success_css_class': 'has-success',\n 'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'\n }, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},\n 'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}\nAXES_LOGIN_FAILURE_LIMIT = 3\nAXES_LOCK_OUT_AT_FAILURE = True\nAXES_USE_USER_AGENT = True\nAXES_COOLOFF_TIME = 50\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE = None\nAXES_LOCKOUT_URL = None\nAXES_VERBOSE = True\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nADMINS = ('Petar Pilipovic', 'petar@literatillc.com'),\nREST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}\n",
"step-5": "from .settings import *\n\n\n\n# Heroku Configurations\n# Parse database configuration from $DATABASE_URL\nimport dj_database_url\n\nDATABASES = {'default': dj_database_url.config()}\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# loading local_settings.py\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\n\n# ALLAUTH configuration\n\n\n# Specific the login method to use\nACCOUNT_USERNAME_REQUIRED = False\n# ACCOUNT_AUTHENTICATION_METHOD = \"username\", \"email\", \"username_email\"\n\n# Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”.\n# When set to “mandatory” the user is blocked from logging in until the email address is verified.\n# Choose “optional” or “none” to allow logins with an unverified e-mail address.\n# In case of “optional”, the e-mail verification mail is still sent,\n# whereas in case of “none” no e-mail verification mails are sent.\nACCOUNT_EMAIL_VERIFICATION = \"none\"\n\n# Determines whether or not the user is automatically logged out by a mere GET request.\n# See documentation for the LogoutView for details.\nACCOUNT_LOGOUT_ON_GET = False\n\n# Request e-mail address from 3rd import party account provider?\n# E.g. using OpenID AX, or the Facebook “email” permission.\nSOCIALACCOUNT_QUERY_EMAIL = True\n\n# Dictionary containing provider specific settings.\nSOCIALACCOUNT_PROVIDERS = {\n 'facebook': {\n # we use facebook js_sdk instead od oauth2\n 'METHOD': 'js_sdk',\n 'SCOPE': ['email', 'public_profile', 'user_friends'],\n # using AUTH_PARAMS to pass along other parametees\n # to the FB.login JS SDK call\n 'AUTH_PARAMS': {'auth_type': 'reauthenticate'},\n # field are fetch from the import Graph API\n 'FIELDS': ['first_name', 'last_name', 'email', 'birthday'],\n # JS SDK return a short-lived token suitable for client-side use.\n 'EXCHANGE_TOKEN': True,\n # Chose the current active language of the request\n 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False,\n # Facebook Graph API version\n 'VERSION': 'v2.7'\n },\n 'linkedin': {\n 'SCOPE': ['r_emailaddress'],\n 'PROFILE_FIELDS': [\n 'id',\n 'first-name',\n 'last-name',\n 'email-address',\n 'public-profile-url'\n ]\n }\n}\n\n# login redirect url\nLOGIN_REDIRECT_URL = \"/blog/jobs\"\n\n# Default settings\nBOOTSTRAP3 = {\n\n # The URL to the jQuery JavaScript file\n 'jquery_url': '//code.jquery.com/jquery.min.js',\n\n # The Bootstrap base URL\n 'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',\n\n # The complete URL to the Bootstrap CSS file (None means derive it from base_url)\n 'css_url': None,\n\n # The complete URL to the Bootstrap CSS file (None means no theme)\n 'theme_url': None,\n\n # The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)\n 'javascript_url': None,\n\n # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)\n 'javascript_in_head': False,\n\n # Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)\n 'include_jquery': False,\n\n # Label class to use in horizontal forms\n 'horizontal_label_class': 'col-md-3',\n\n # Field class to use in horizontal forms\n 'horizontal_field_class': 'col-md-9',\n\n # Set HTML required attribute on required fields\n 'set_required': True,\n\n # Set HTML disabled attribute on disabled fields\n 'set_disabled': False,\n\n # Set placeholder attributes to label if no placeholder is provided\n 'set_placeholder': True,\n\n # Class to indicate required (better to set this in your Django form)\n 'required_css_class': '',\n\n # Class to indicate error (better to set this in your Django form)\n 'error_css_class': 'has-error',\n\n # Class to indicate success, meaning the field has valid input (better to set this in your Django form)\n 'success_css_class': 'has-success',\n\n # Renderers (only set these if you have studied the source and understand the inner workings)\n 'formset_renderers':{\n 'default': 'bootstrap3.renderers.FormsetRenderer',\n },\n 'form_renderers': {\n 'default': 'bootstrap3.renderers.FormRenderer',\n },\n 'field_renderers': {\n 'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer',\n },\n}\n\n# Axes Configurations\n# Number of login attempts allowed before a record is created for the failed logins.\nAXES_LOGIN_FAILURE_LIMIT = 3\n\n# After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)?\nAXES_LOCK_OUT_AT_FAILURE = True\n\n# If True, lock out / log based on an IP address AND a user agent. This means requests from different import user\n# agents but from the import same IP are treated differently.\nAXES_USE_USER_AGENT = True\n\n# Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a\n# python timedelta object or an integer, if you set it to be integer it will represent a number of hours\nAXES_COOLOFF_TIME = 50\n\n# Specifies a logging mechanism for axes to use\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\n\n# Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as\n# context variables\nAXES_LOCKOUT_TEMPLATE = None\n\n# Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template\n# will be used\nAXES_LOCKOUT_URL = None\n\n# If Truem you'll see slightly more logging for Axes\nAXES_VERBOSE = True\n\n# The name of the for field that contains your usernames\n# AXES_USERNAME_FORM_FIELD = username\n\n# If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on\n# IP. Default: False\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\n\n# Crispy forms will use BOOTSTRAP3 TEMPLATE PACK\nCRISPY_TEMPLATE_PACK = \"bootstrap3\"\n\n# Signal Admins Configurations\nADMINS = (\n (\"Petar Pilipovic\", \"petar@literatillc.com\"),\n)\n\n# RESTframework Permission classes configuration\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": [\n \"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly\"\n ]\n}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=200)
completed = models.IntegerField(default=0)
|
normal
|
{
"blob_id": "4b075d8211d7047f6f08fe6f6f55e4703bdb6f1f",
"index": 3164,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Todo(models.Model):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n completed = models.IntegerField(default=0)\n",
"step-4": "from django.db import models\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n completed = models.IntegerField(default=0)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Todo(models.Model):\n\ttitle = models.CharField(max_length=200)\n\tcompleted = models.IntegerField(default=0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
<|reserved_special_token_0|>
def tree_to_array_bfs(root):
q = Queue(maxsize=0)
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
if node is None:
return value
if node.value == target:
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
if node.value > target:
return dfs(node.left)
elif node.value < target:
return dfs(node.right)
return dfs(tree)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n > 0:
root = Node(array[0])
def dfs(node, index):
if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:
node.left = Node(array[2 * (index + 1) - 1])
dfs(node.left, 2 * (index + 1) - 1)
if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:
node.right = Node(array[2 * (index + 1)])
dfs(node.right, 2 * (index + 1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize=0)
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
if node is None:
return value
if node.value == target:
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
if node.value > target:
return dfs(node.left)
elif node.value < target:
return dfs(node.right)
return dfs(tree)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n > 0:
root = Node(array[0])
def dfs(node, index):
if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:
node.left = Node(array[2 * (index + 1) - 1])
dfs(node.left, 2 * (index + 1) - 1)
if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:
node.right = Node(array[2 * (index + 1)])
dfs(node.right, 2 * (index + 1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize=0)
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
if node is None:
return value
if node.value == target:
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
if node.value > target:
return dfs(node.left)
elif node.value < target:
return dfs(node.right)
return dfs(tree)
if __name__ == '__main__':
array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2
root = array_to_tree_dfs(array)
new_array = tree_to_array_bfs(root)
print(new_array)
print(findClosestValueInBst(root, 6))
<|reserved_special_token_1|>
from queue import Queue
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n > 0:
root = Node(array[0])
def dfs(node, index):
if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:
node.left = Node(array[2 * (index + 1) - 1])
dfs(node.left, 2 * (index + 1) - 1)
if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:
node.right = Node(array[2 * (index + 1)])
dfs(node.right, 2 * (index + 1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize=0)
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
if node is None:
return value
if node.value == target:
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
if node.value > target:
return dfs(node.left)
elif node.value < target:
return dfs(node.right)
return dfs(tree)
if __name__ == '__main__':
array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2
root = array_to_tree_dfs(array)
new_array = tree_to_array_bfs(root)
print(new_array)
print(findClosestValueInBst(root, 6))
<|reserved_special_token_1|>
from queue import Queue
class Node():
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n>0:
root = Node(array[0])
def dfs(node, index):
# if index >= n:
# return
# else:
if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:
node.left = Node(array[2*(index+1) -1])
dfs(node.left, 2*(index+1) -1)
if 2*(index+1) < n and array[2*(index+1)] is not None:
node.right = Node(array[2*(index+1)])
dfs(node.right, 2*(index+1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize = 0) # queue with infinity size
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
# stop condition
if(node is None):
return value
if(node.value == target):
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
# recursion part
if(node.value > target):
return dfs(node.left)
elif(node.value < target):
return dfs(node.right)
return dfs(tree)
if __name__ == '__main__':
array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2
root = array_to_tree_dfs(array)
new_array = tree_to_array_bfs(root)
print(new_array)
print(findClosestValueInBst(root, 6))
|
flexible
|
{
"blob_id": "a52762fb13c04ced07a41a752578c4173d1eac42",
"index": 8350,
"step-1": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\n<mask token>\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root)\n print(new_array)\n print(findClosestValueInBst(root, 6))\n",
"step-4": "from queue import Queue\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root)\n print(new_array)\n print(findClosestValueInBst(root, 6))\n",
"step-5": "from queue import Queue\n\nclass Node():\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n>0:\n root = Node(array[0])\n\n def dfs(node, index):\n # if index >= n:\n # return\n # else:\n if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:\n node.left = Node(array[2*(index+1) -1])\n dfs(node.left, 2*(index+1) -1)\n if 2*(index+1) < n and array[2*(index+1)] is not None:\n node.right = Node(array[2*(index+1)])\n dfs(node.right, 2*(index+1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize = 0) # queue with infinity size\n q.put(root)\n array = []\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n \n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n \n def dfs(node):\n nonlocal distance, value\n # stop condition\n if(node is None):\n return value\n if(node.value == target):\n return target\n\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n\n # recursion part\n if(node.value > target):\n return dfs(node.left)\n elif(node.value < target):\n return dfs(node.right)\n\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root) \n print(new_array)\n print(findClosestValueInBst(root, 6))\n \n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for row in screen:
if row_count == 0:
row_count = row_count + 1
continue
row = row.split('\t')
data[row[1]] = row
print(data['55299'])
print(data['51666'])
print(data['28987'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
screen_id = 178
request_url = cfg.BASE_URL + '/screen/' + str(screen_id)
params = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',
'score1min': 0.9, 'score1max': 0.98}
r = requests.get(request_url, params=params)
screen = r.text.splitlines()
row_count = 0
data = {}
for row in screen:
if row_count == 0:
row_count = row_count + 1
continue
row = row.split('\t')
data[row[1]] = row
print(data['55299'])
print(data['51666'])
print(data['28987'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import requests
from core import config as cfg
screen_id = 178
request_url = cfg.BASE_URL + '/screen/' + str(screen_id)
params = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',
'score1min': 0.9, 'score1max': 0.98}
r = requests.get(request_url, params=params)
screen = r.text.splitlines()
row_count = 0
data = {}
for row in screen:
if row_count == 0:
row_count = row_count + 1
continue
row = row.split('\t')
data[row[1]] = row
print(data['55299'])
print(data['51666'])
print(data['28987'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetch screen scores with customizable search criteria
that can be tailored to match your own requirements
in tab format
"""
import requests
from core import config as cfg
screen_id = 178
request_url = cfg.BASE_URL + "/screen/" + str(screen_id)
# These parameters can be modified to match any search criteria following
# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice
# In this instance, we've chosen to return results in "tab" format with a header, and
# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98
params = {
"accesskey": cfg.ACCESS_KEY,
"format": "tab",
"header": "yes",
"score1min": 0.9,
"score1max": 0.98
}
r = requests.get( request_url, params = params )
screen = r.text.splitlines( )
row_count = 0
data = {}
for row in screen :
# Skip the header, but you could have also simply turned
# it off with header: "no" as a parameter instead
if row_count == 0 :
row_count = row_count + 1
continue
# Tab files are tab delimited
row = row.split( "\t" )
# create a hash of results by gene identifier
data[row[1]] = row
# Print out data about the genes BRIX1, ASB4, and NOB1
print( data['55299'] )
print( data['51666'] )
print( data['28987'] )
"""
Output as of version 1.0.1:
['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
"""
|
flexible
|
{
"blob_id": "80c6dd1c76b3ac56f34e36f571e8db3927994311",
"index": 8162,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-3": "<mask token>\nscreen_id = 178\nrequest_url = cfg.BASE_URL + '/screen/' + str(screen_id)\nparams = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',\n 'score1min': 0.9, 'score1max': 0.98}\nr = requests.get(request_url, params=params)\nscreen = r.text.splitlines()\nrow_count = 0\ndata = {}\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-4": "<mask token>\nimport requests\nfrom core import config as cfg\nscreen_id = 178\nrequest_url = cfg.BASE_URL + '/screen/' + str(screen_id)\nparams = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',\n 'score1min': 0.9, 'score1max': 0.98}\nr = requests.get(request_url, params=params)\nscreen = r.text.splitlines()\nrow_count = 0\ndata = {}\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFetch screen scores with customizable search criteria\nthat can be tailored to match your own requirements\nin tab format\n\"\"\"\n\nimport requests\nfrom core import config as cfg\n\nscreen_id = 178\nrequest_url = cfg.BASE_URL + \"/screen/\" + str(screen_id)\n\n# These parameters can be modified to match any search criteria following\n# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice\n# In this instance, we've chosen to return results in \"tab\" format with a header, and \n# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98\nparams = {\n \"accesskey\": cfg.ACCESS_KEY,\n \"format\": \"tab\",\n \"header\": \"yes\",\n \"score1min\": 0.9,\n \"score1max\": 0.98\n}\n\nr = requests.get( request_url, params = params )\nscreen = r.text.splitlines( )\n\nrow_count = 0\ndata = {}\nfor row in screen :\n\n # Skip the header, but you could have also simply turned\n # it off with header: \"no\" as a parameter instead\n if row_count == 0 :\n row_count = row_count + 1\n continue\n\n # Tab files are tab delimited\n row = row.split( \"\\t\" )\n \n # create a hash of results by gene identifier\n data[row[1]] = row\n\n# Print out data about the genes BRIX1, ASB4, and NOB1\nprint( data['55299'] )\nprint( data['51666'] )\nprint( data['28987'] )\n\n\"\"\" \nOutput as of version 1.0.1:\n['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n\"\"\"",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
meta.create_all(engine)
<|reserved_special_token_0|>
conn.execute(students.insert(), [{'name': 'Rajiv', 'lastname': 'Khanna'}, {
'name': 'Komal', 'lastname': 'Bhandari'}, {'name': 'Abdul', 'lastname':
'Sattar'}, {'name': 'Priya', 'lastname': 'Rajhans'}])
<|reserved_special_token_0|>
for row in result:
print(row)
<|reserved_special_token_0|>
print(row)
<|reserved_special_token_0|>
for row in result:
print(row)
<|reserved_special_token_0|>
conn.execute(stmt)
<|reserved_special_token_0|>
conn.execute(s).fetchall()
<|reserved_special_token_0|>
conn.execute(stmt)
<|reserved_special_token_0|>
meta.create_all(engine)
conn.execute(addresses.insert(), [{'st_id': 1, 'postal_add':
'Shivajinagar Pune', 'email_add': 'ravi@gmail.com'}, {'st_id': 1,
'postal_add': 'ChurchGate Mumbai', 'email_add': 'kapoor@gmail.com'}, {
'st_id': 3, 'postal_add': 'Jubilee Hills Hyderabad', 'email_add':
'komal@gmail.com'}, {'st_id': 5, 'postal_add': 'MG Road Bangaluru',
'email_add': 'as@yahoo.com'}, {'st_id': 2, 'postal_add':
'Cannought Place new Delhi', 'email_add': 'admin@khanna.com'}])
<|reserved_special_token_0|>
for res in result:
print(res)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine = create_engine('sqlite:///college.db', echo=True)
meta = MetaData()
students = Table('students', meta, Column('id', Integer, primary_key=True),
Column('name', String), Column('lastname', String))
meta.create_all(engine)
conn = engine.connect()
ins = students.insert().values(name='Ravi', lastname='Mahajan')
res = conn.execute(ins)
conn.execute(students.insert(), [{'name': 'Rajiv', 'lastname': 'Khanna'}, {
'name': 'Komal', 'lastname': 'Bhandari'}, {'name': 'Abdul', 'lastname':
'Sattar'}, {'name': 'Priya', 'lastname': 'Rajhans'}])
s = students.select()
result = conn.execute(s)
for row in result:
print(row)
s = students.select().where(students.c.id > 2)
result = conn.execute(s)
row = result.fetchall()
print(row)
s = select([students])
result = conn.execute(s)
for row in result:
print(row)
t = text('SELECT * from students')
result = conn.execute(t)
stmt = students.update().where(students.c.lastname == 'Khanna').values(lastname
='Bhatt')
conn.execute(stmt)
s = students.select()
conn.execute(s).fetchall()
<|reserved_special_token_0|>
stmt = update(students).where(students.c.lastname == 'Khanna').values(lastname
='Kapoor')
stmt2 = students.delete().where(students.c.lastname == 'Rajhans')
conn.execute(stmt)
addresses = Table('addresses', meta, Column('id', Integer, primary_key=True
), Column('st_id', Integer, ForeignKey('students.id')), Column(
'postal_add', String), Column('email_add', String))
meta.create_all(engine)
conn.execute(addresses.insert(), [{'st_id': 1, 'postal_add':
'Shivajinagar Pune', 'email_add': 'ravi@gmail.com'}, {'st_id': 1,
'postal_add': 'ChurchGate Mumbai', 'email_add': 'kapoor@gmail.com'}, {
'st_id': 3, 'postal_add': 'Jubilee Hills Hyderabad', 'email_add':
'komal@gmail.com'}, {'st_id': 5, 'postal_add': 'MG Road Bangaluru',
'email_add': 'as@yahoo.com'}, {'st_id': 2, 'postal_add':
'Cannought Place new Delhi', 'email_add': 'admin@khanna.com'}])
stmt = students.update().values({students.c.name: 'xyz', addresses.c.
email_add: 'abc@xyz.com'}).where(students.c.id == addresses.c.id)
<|reserved_special_token_0|>
j = students.join(addresses, students.c.id == addresses.c.st_id)
stmt = select([students]).select_from(j)
result = conn.execute(stmt)
for res in result:
print(res)
<|reserved_special_token_1|>
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.sql import select
from sqlalchemy import text
engine = create_engine('sqlite:///college.db', echo=True)
meta = MetaData()
students = Table('students', meta, Column('id', Integer, primary_key=True),
Column('name', String), Column('lastname', String))
meta.create_all(engine)
conn = engine.connect()
ins = students.insert().values(name='Ravi', lastname='Mahajan')
res = conn.execute(ins)
conn.execute(students.insert(), [{'name': 'Rajiv', 'lastname': 'Khanna'}, {
'name': 'Komal', 'lastname': 'Bhandari'}, {'name': 'Abdul', 'lastname':
'Sattar'}, {'name': 'Priya', 'lastname': 'Rajhans'}])
s = students.select()
result = conn.execute(s)
for row in result:
print(row)
s = students.select().where(students.c.id > 2)
result = conn.execute(s)
row = result.fetchall()
print(row)
s = select([students])
result = conn.execute(s)
for row in result:
print(row)
t = text('SELECT * from students')
result = conn.execute(t)
stmt = students.update().where(students.c.lastname == 'Khanna').values(lastname
='Bhatt')
conn.execute(stmt)
s = students.select()
conn.execute(s).fetchall()
from sqlalchemy.sql.expression import update
stmt = update(students).where(students.c.lastname == 'Khanna').values(lastname
='Kapoor')
stmt2 = students.delete().where(students.c.lastname == 'Rajhans')
conn.execute(stmt)
addresses = Table('addresses', meta, Column('id', Integer, primary_key=True
), Column('st_id', Integer, ForeignKey('students.id')), Column(
'postal_add', String), Column('email_add', String))
meta.create_all(engine)
conn.execute(addresses.insert(), [{'st_id': 1, 'postal_add':
'Shivajinagar Pune', 'email_add': 'ravi@gmail.com'}, {'st_id': 1,
'postal_add': 'ChurchGate Mumbai', 'email_add': 'kapoor@gmail.com'}, {
'st_id': 3, 'postal_add': 'Jubilee Hills Hyderabad', 'email_add':
'komal@gmail.com'}, {'st_id': 5, 'postal_add': 'MG Road Bangaluru',
'email_add': 'as@yahoo.com'}, {'st_id': 2, 'postal_add':
'Cannought Place new Delhi', 'email_add': 'admin@khanna.com'}])
stmt = students.update().values({students.c.name: 'xyz', addresses.c.
email_add: 'abc@xyz.com'}).where(students.c.id == addresses.c.id)
from sqlalchemy import join
from sqlalchemy.sql import select
j = students.join(addresses, students.c.id == addresses.c.st_id)
stmt = select([students]).select_from(j)
result = conn.execute(stmt)
for res in result:
print(res)
<|reserved_special_token_1|>
from sqlalchemy import create_engine
from sqlalchemy import Table,Column,Integer,String,MetaData,ForeignKey
from sqlalchemy.sql import select
from sqlalchemy import text
#Creating a database 'college.db'
engine = create_engine('sqlite:///college.db', echo=True)
meta = MetaData()
#Creating a Students table
students = Table(
'students',meta,
Column('id',Integer,primary_key=True),
Column('name',String),
Column('lastname',String)
)
meta.create_all(engine)
#Inserting values
conn = engine.connect()
ins = students.insert().values(name='Ravi',lastname='Mahajan')
res = conn.execute(ins)
# Execute many commands
conn.execute(students.insert(),[
{'name': 'Rajiv', 'lastname': 'Khanna'},
{'name': 'Komal', 'lastname': 'Bhandari'},
{'name': 'Abdul', 'lastname': 'Sattar'},
{'name': 'Priya', 'lastname': 'Rajhans'},
])
# Selecting from table Students
s = students.select()
result = conn.execute(s)
# row = result.fetchall()
for row in result:
print(row)
# Where condition
s = students.select().where(students.c.id>2)
result = conn.execute(s)
row = result.fetchall()
print(row)
s = select([students])
result = conn.execute(s)
for row in result:
print(row)
# Using text to execute query using text
t = text('SELECT * from students')
result = conn.execute(t)
# Update
stmt = students.update().where(students.c.lastname=='Khanna').values(lastname='Bhatt')
conn.execute(stmt)
s = students.select()
conn.execute(s).fetchall()
from sqlalchemy.sql.expression import update
stmt = update(students).where(students.c.lastname == 'Khanna').values(lastname = 'Kapoor')
stmt2 = students.delete().where(students.c.lastname=='Rajhans')
conn.execute(stmt)
addresses = Table(
'addresses', meta,
Column('id', Integer, primary_key = True),
Column('st_id', Integer, ForeignKey('students.id')),
Column('postal_add', String),
Column('email_add', String))
meta.create_all(engine)
conn.execute(addresses.insert(), [
{'st_id':1, 'postal_add':'Shivajinagar Pune', 'email_add':'ravi@gmail.com'},
{'st_id':1, 'postal_add':'ChurchGate Mumbai', 'email_add':'kapoor@gmail.com'},
{'st_id':3, 'postal_add':'Jubilee Hills Hyderabad', 'email_add':'komal@gmail.com'},
{'st_id':5, 'postal_add':'MG Road Bangaluru', 'email_add':'as@yahoo.com'},
{'st_id':2, 'postal_add':'Cannought Place new Delhi', 'email_add':'admin@khanna.com'},
])
# Update query for Multiple tables
stmt = students.update().values({students.c.name:'xyz',
addresses.c.email_add:'abc@xyz.com'}).where(students.c.id == addresses.c.id)
# using joins
from sqlalchemy import join
from sqlalchemy.sql import select
j = students.join(addresses,students.c.id==addresses.c.st_id)
stmt = select([students]).select_from(j)
result = conn.execute(stmt)
for res in result:
print(res)
|
flexible
|
{
"blob_id": "7ea6fefa75d36ff45dcea49919fdc632e378a73f",
"index": 9113,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmeta.create_all(engine)\n<mask token>\nconn.execute(students.insert(), [{'name': 'Rajiv', 'lastname': 'Khanna'}, {\n 'name': 'Komal', 'lastname': 'Bhandari'}, {'name': 'Abdul', 'lastname':\n 'Sattar'}, {'name': 'Priya', 'lastname': 'Rajhans'}])\n<mask token>\nfor row in result:\n print(row)\n<mask token>\nprint(row)\n<mask token>\nfor row in result:\n print(row)\n<mask token>\nconn.execute(stmt)\n<mask token>\nconn.execute(s).fetchall()\n<mask token>\nconn.execute(stmt)\n<mask token>\nmeta.create_all(engine)\nconn.execute(addresses.insert(), [{'st_id': 1, 'postal_add':\n 'Shivajinagar Pune', 'email_add': 'ravi@gmail.com'}, {'st_id': 1,\n 'postal_add': 'ChurchGate Mumbai', 'email_add': 'kapoor@gmail.com'}, {\n 'st_id': 3, 'postal_add': 'Jubilee Hills Hyderabad', 'email_add':\n 'komal@gmail.com'}, {'st_id': 5, 'postal_add': 'MG Road Bangaluru',\n 'email_add': 'as@yahoo.com'}, {'st_id': 2, 'postal_add':\n 'Cannought Place new Delhi', 'email_add': 'admin@khanna.com'}])\n<mask token>\nfor res in result:\n print(res)\n",
"step-3": "<mask token>\nengine = create_engine('sqlite:///college.db', echo=True)\nmeta = MetaData()\nstudents = Table('students', meta, Column('id', Integer, primary_key=True),\n Column('name', String), Column('lastname', String))\nmeta.create_all(engine)\nconn = engine.connect()\nins = students.insert().values(name='Ravi', lastname='Mahajan')\nres = conn.execute(ins)\nconn.execute(students.insert(), [{'name': 'Rajiv', 'lastname': 'Khanna'}, {\n 'name': 'Komal', 'lastname': 'Bhandari'}, {'name': 'Abdul', 'lastname':\n 'Sattar'}, {'name': 'Priya', 'lastname': 'Rajhans'}])\ns = students.select()\nresult = conn.execute(s)\nfor row in result:\n print(row)\ns = students.select().where(students.c.id > 2)\nresult = conn.execute(s)\nrow = result.fetchall()\nprint(row)\ns = select([students])\nresult = conn.execute(s)\nfor row in result:\n print(row)\nt = text('SELECT * from students')\nresult = conn.execute(t)\nstmt = students.update().where(students.c.lastname == 'Khanna').values(lastname\n ='Bhatt')\nconn.execute(stmt)\ns = students.select()\nconn.execute(s).fetchall()\n<mask token>\nstmt = update(students).where(students.c.lastname == 'Khanna').values(lastname\n ='Kapoor')\nstmt2 = students.delete().where(students.c.lastname == 'Rajhans')\nconn.execute(stmt)\naddresses = Table('addresses', meta, Column('id', Integer, primary_key=True\n ), Column('st_id', Integer, ForeignKey('students.id')), Column(\n 'postal_add', String), Column('email_add', String))\nmeta.create_all(engine)\nconn.execute(addresses.insert(), [{'st_id': 1, 'postal_add':\n 'Shivajinagar Pune', 'email_add': 'ravi@gmail.com'}, {'st_id': 1,\n 'postal_add': 'ChurchGate Mumbai', 'email_add': 'kapoor@gmail.com'}, {\n 'st_id': 3, 'postal_add': 'Jubilee Hills Hyderabad', 'email_add':\n 'komal@gmail.com'}, {'st_id': 5, 'postal_add': 'MG Road Bangaluru',\n 'email_add': 'as@yahoo.com'}, {'st_id': 2, 'postal_add':\n 'Cannought Place new Delhi', 'email_add': 'admin@khanna.com'}])\nstmt = students.update().values({students.c.name: 'xyz', addresses.c.\n email_add: 'abc@xyz.com'}).where(students.c.id == addresses.c.id)\n<mask token>\nj = students.join(addresses, students.c.id == addresses.c.st_id)\nstmt = select([students]).select_from(j)\nresult = conn.execute(stmt)\nfor res in result:\n print(res)\n",
"step-4": "from sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\nfrom sqlalchemy.sql import select\nfrom sqlalchemy import text\nengine = create_engine('sqlite:///college.db', echo=True)\nmeta = MetaData()\nstudents = Table('students', meta, Column('id', Integer, primary_key=True),\n Column('name', String), Column('lastname', String))\nmeta.create_all(engine)\nconn = engine.connect()\nins = students.insert().values(name='Ravi', lastname='Mahajan')\nres = conn.execute(ins)\nconn.execute(students.insert(), [{'name': 'Rajiv', 'lastname': 'Khanna'}, {\n 'name': 'Komal', 'lastname': 'Bhandari'}, {'name': 'Abdul', 'lastname':\n 'Sattar'}, {'name': 'Priya', 'lastname': 'Rajhans'}])\ns = students.select()\nresult = conn.execute(s)\nfor row in result:\n print(row)\ns = students.select().where(students.c.id > 2)\nresult = conn.execute(s)\nrow = result.fetchall()\nprint(row)\ns = select([students])\nresult = conn.execute(s)\nfor row in result:\n print(row)\nt = text('SELECT * from students')\nresult = conn.execute(t)\nstmt = students.update().where(students.c.lastname == 'Khanna').values(lastname\n ='Bhatt')\nconn.execute(stmt)\ns = students.select()\nconn.execute(s).fetchall()\nfrom sqlalchemy.sql.expression import update\nstmt = update(students).where(students.c.lastname == 'Khanna').values(lastname\n ='Kapoor')\nstmt2 = students.delete().where(students.c.lastname == 'Rajhans')\nconn.execute(stmt)\naddresses = Table('addresses', meta, Column('id', Integer, primary_key=True\n ), Column('st_id', Integer, ForeignKey('students.id')), Column(\n 'postal_add', String), Column('email_add', String))\nmeta.create_all(engine)\nconn.execute(addresses.insert(), [{'st_id': 1, 'postal_add':\n 'Shivajinagar Pune', 'email_add': 'ravi@gmail.com'}, {'st_id': 1,\n 'postal_add': 'ChurchGate Mumbai', 'email_add': 'kapoor@gmail.com'}, {\n 'st_id': 3, 'postal_add': 'Jubilee Hills Hyderabad', 'email_add':\n 'komal@gmail.com'}, {'st_id': 5, 'postal_add': 'MG Road Bangaluru',\n 'email_add': 'as@yahoo.com'}, {'st_id': 2, 'postal_add':\n 'Cannought Place new Delhi', 'email_add': 'admin@khanna.com'}])\nstmt = students.update().values({students.c.name: 'xyz', addresses.c.\n email_add: 'abc@xyz.com'}).where(students.c.id == addresses.c.id)\nfrom sqlalchemy import join\nfrom sqlalchemy.sql import select\nj = students.join(addresses, students.c.id == addresses.c.st_id)\nstmt = select([students]).select_from(j)\nresult = conn.execute(stmt)\nfor res in result:\n print(res)\n",
"step-5": "from sqlalchemy import create_engine\r\nfrom sqlalchemy import Table,Column,Integer,String,MetaData,ForeignKey\r\nfrom sqlalchemy.sql import select\r\nfrom sqlalchemy import text\r\n\r\n#Creating a database 'college.db'\r\nengine = create_engine('sqlite:///college.db', echo=True)\r\nmeta = MetaData()\r\n\r\n#Creating a Students table\r\nstudents = Table(\r\n 'students',meta,\r\n Column('id',Integer,primary_key=True),\r\n Column('name',String),\r\n Column('lastname',String)\r\n)\r\nmeta.create_all(engine)\r\n\r\n#Inserting values\r\nconn = engine.connect()\r\nins = students.insert().values(name='Ravi',lastname='Mahajan')\r\nres = conn.execute(ins)\r\n\r\n# Execute many commands\r\nconn.execute(students.insert(),[\r\n {'name': 'Rajiv', 'lastname': 'Khanna'},\r\n {'name': 'Komal', 'lastname': 'Bhandari'},\r\n {'name': 'Abdul', 'lastname': 'Sattar'},\r\n {'name': 'Priya', 'lastname': 'Rajhans'},\r\n])\r\n\r\n# Selecting from table Students\r\ns = students.select()\r\nresult = conn.execute(s)\r\n# row = result.fetchall()\r\nfor row in result:\r\n print(row)\r\n\r\n# Where condition\r\ns = students.select().where(students.c.id>2)\r\nresult = conn.execute(s)\r\nrow = result.fetchall()\r\nprint(row)\r\n\r\ns = select([students])\r\nresult = conn.execute(s)\r\nfor row in result:\r\n print(row)\r\n\r\n# Using text to execute query using text\r\nt = text('SELECT * from students')\r\nresult = conn.execute(t)\r\n\r\n# Update\r\nstmt = students.update().where(students.c.lastname=='Khanna').values(lastname='Bhatt')\r\nconn.execute(stmt)\r\ns = students.select()\r\nconn.execute(s).fetchall()\r\n\r\nfrom sqlalchemy.sql.expression import update\r\nstmt = update(students).where(students.c.lastname == 'Khanna').values(lastname = 'Kapoor')\r\n\r\nstmt2 = students.delete().where(students.c.lastname=='Rajhans')\r\nconn.execute(stmt)\r\n\r\naddresses = Table(\r\n 'addresses', meta,\r\n Column('id', Integer, primary_key = True),\r\n Column('st_id', Integer, ForeignKey('students.id')),\r\n Column('postal_add', String),\r\n Column('email_add', String))\r\n\r\nmeta.create_all(engine)\r\n\r\nconn.execute(addresses.insert(), [\r\n {'st_id':1, 'postal_add':'Shivajinagar Pune', 'email_add':'ravi@gmail.com'},\r\n {'st_id':1, 'postal_add':'ChurchGate Mumbai', 'email_add':'kapoor@gmail.com'},\r\n {'st_id':3, 'postal_add':'Jubilee Hills Hyderabad', 'email_add':'komal@gmail.com'},\r\n {'st_id':5, 'postal_add':'MG Road Bangaluru', 'email_add':'as@yahoo.com'},\r\n {'st_id':2, 'postal_add':'Cannought Place new Delhi', 'email_add':'admin@khanna.com'},\r\n])\r\n\r\n# Update query for Multiple tables\r\nstmt = students.update().values({students.c.name:'xyz',\r\n addresses.c.email_add:'abc@xyz.com'}).where(students.c.id == addresses.c.id)\r\n\r\n# using joins\r\nfrom sqlalchemy import join\r\nfrom sqlalchemy.sql import select\r\nj = students.join(addresses,students.c.id==addresses.c.st_id)\r\nstmt = select([students]).select_from(j)\r\nresult = conn.execute(stmt)\r\nfor res in result:\r\n print(res)\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
import time
driver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')
driver.get('https://www.zhaopin.com/')
time.sleep(5)
driver.find_element_by_id('KeyWord_kw2').send_keys('技术')
driver.find_element_by_class_name('doSearch').click()
time.sleep(5)
|
normal
|
{
"blob_id": "fc5a4c27a21c2bd3900a6ad0bff68c249fe29d7a",
"index": 1865,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.zhaopin.com/')\ntime.sleep(5)\ndriver.find_element_by_id('KeyWord_kw2').send_keys('技术')\ndriver.find_element_by_class_name('doSearch').click()\ntime.sleep(5)\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')\ndriver.get('https://www.zhaopin.com/')\ntime.sleep(5)\ndriver.find_element_by_id('KeyWord_kw2').send_keys('技术')\ndriver.find_element_by_class_name('doSearch').click()\ntime.sleep(5)\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport requests\nimport time\ndriver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')\ndriver.get('https://www.zhaopin.com/')\ntime.sleep(5)\ndriver.find_element_by_id('KeyWord_kw2').send_keys('技术')\ndriver.find_element_by_class_name('doSearch').click()\ntime.sleep(5)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.transactions = []
self.create_block(proof=0, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = []
self.chain.append(block)
return block
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
<|reserved_special_token_0|>
def add_transaction(self, senders, receiver, amount):
self.transactions.append({'senders': senders, 'receiver': receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
<|reserved_special_token_0|>
def replace_chain(self):
max_length = len(self.chain)
longest_chain = None
network = self.nodes
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.transactions = []
self.create_block(proof=0, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_transaction(self, senders, receiver, amount):
self.transactions.append({'senders': senders, 'receiver': receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
max_length = len(self.chain)
longest_chain = None
network = self.nodes
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(node_address, 'Bhavjot', 1)
block = blockchain.create_block(proof, previous_hash)
response = {'message':
'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block
['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],
'previous_hash': block['previous_hash'], 'transactions': block[
'transactions']}
return jsonify(response), 200
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}
return jsonify(response), 200
<|reserved_special_token_0|>
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(json['sender'], json['receiver'],
json['amount'])
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return 'No node', 400
for node in nodes:
blockchain.add_node(node)
response = {'message':
'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'
, 'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message':
'The nodes had different chains so the chain was replaced by the longest one.'
, 'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blockchain:
def __init__(self):
self.chain = []
self.transactions = []
self.create_block(proof=0, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_transaction(self, senders, receiver, amount):
self.transactions.append({'senders': senders, 'receiver': receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
max_length = len(self.chain)
longest_chain = None
network = self.nodes
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
<|reserved_special_token_0|>
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(node_address, 'Bhavjot', 1)
block = blockchain.create_block(proof, previous_hash)
response = {'message':
'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block
['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],
'previous_hash': block['previous_hash'], 'transactions': block[
'transactions']}
return jsonify(response), 200
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}
return jsonify(response), 200
@app.route('/', methods=['GET'])
def custom_message():
response = {'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'}
return jsonify(response), 200
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(json['sender'], json['receiver'],
json['amount'])
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return 'No node', 400
for node in nodes:
blockchain.add_node(node)
response = {'message':
'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'
, 'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message':
'The nodes had different chains so the chain was replaced by the longest one.'
, 'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
app.run(host='0.0.0.0', port=5001)
<|reserved_special_token_1|>
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
class Blockchain:
def __init__(self):
self.chain = []
self.transactions = []
self.create_block(proof=0, previous_hash='0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.
datetime.now()), 'proof': proof, 'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof ** 2 -
previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **
2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_transaction(self, senders, receiver, amount):
self.transactions.append({'senders': senders, 'receiver': receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
max_length = len(self.chain)
longest_chain = None
network = self.nodes
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
app = Flask(__name__)
blockchain = Blockchain()
node_address = str(uuid4()).replace('-', '')
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(node_address, 'Bhavjot', 1)
block = blockchain.create_block(proof, previous_hash)
response = {'message':
'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block
['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],
'previous_hash': block['previous_hash'], 'transactions': block[
'transactions']}
return jsonify(response), 200
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}
return jsonify(response), 200
@app.route('/', methods=['GET'])
def custom_message():
response = {'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'}
return jsonify(response), 200
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(json['sender'], json['receiver'],
json['amount'])
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return 'No node', 400
for node in nodes:
blockchain.add_node(node)
response = {'message':
'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'
, 'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message':
'The nodes had different chains so the chain was replaced by the longest one.'
, 'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
app.run(host='0.0.0.0', port=5001)
<|reserved_special_token_1|>
import datetime # to add timestamps on every block in blockchain
import hashlib # library that is ued to hash the block
import json # to communicate in json data
# Flask to implement webservices jsonify to see the jsop message/response
# request help us to connect all the nodes of the blockchain together froming the p2p network
from flask import Flask, jsonify, request
# it will help us to verify that all the blockchain have same blockhain or not http requests (used in replace_cahin)
import requests
from uuid import uuid4
from urllib.parse import urlparse
# Building a Blockchain
class Blockchain:
def __init__(self):
self.chain = [] # our main block chain
# now we will create the list of transation which will record the all transactions
self.transactions = []
# create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block
self.create_block(proof=0, previous_hash='0')
# nodes will contains the unique identifier of the address of all nodes in p2p network
self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it
# part1
def create_block(self, proof, previous_hash):
block = { # dictionary of python data structure
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof, # works like a nounce of block stops when we reach at or below the target
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2-previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's
# hash of a block is created after generating block thats we have only use previous_hash because its already created
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
# reference of first block stored genesis block
previous_block = chain[0]
block_index = 1 # required for iteration
while block_index < len(chain):
block = chain[block_index] # cuurent block
# checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
# verfying the proof of block with the data proof and previous proof it is easy then creating the proof
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
# the more is zero's the more is harder to mine the block
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# functions used to get add the transactions to the lists
def add_transaction(self, senders, receiver, amount):
self.transactions.append({
'senders': senders,
'receiver': receiver,
'amount': amount
})
previous_block = self.get_previous_block()
# +1 beacause before mining the transaction are added so new_block index will be +1 then previous
return previous_block['index']+1
# part-1 ends
# part-3--> dealing with decentarlized application and transactions
# this function allow us to add different nodes to chain
def add_node(self, address): # generating the decentarlized application
# we need to parse the url before adding it
parsed_url = urlparse(address)
# .netloc gives us the unique identifier of the node address removing the unrequired part from it
self.nodes.add(parsed_url.netloc)
# this function help us to solve the problem of consensus protocols (competing chain)
def replace_chain(self):
# this variable help us to find the length of longest chain among different network
max_length = len(self.chain)
longest_chain = None
network = self.nodes # this variable will hold the address of all the nodes in network
for node in network:
# we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length
# using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200: # this ids the vode chaeck something is received in request
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# this will happen in every node of network
if longest_chain:
# if this chain is shorter than otherit will be updated
self.chain = longest_chain
return True
# if this chain is only longest in network than return false and no update
return False
# part-3 ends
# Mining our Blockchain
app = Flask(__name__)
# Creating a Blockchain
# creating the instance of blockchain
blockchain = Blockchain()
# Mining the blockchain
# create an random and unique address for the node on port 5000
# this is the address used by to send the whale coin when the miner mines the wahle coin
node_address = str(uuid4()).replace('-', '')
# part-2
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
# miners price
# usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software
blockchain.add_transaction(node_address, 'Bhavjot', 1)
# when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# getting all blocks in chain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# custom message
@app.route('/', methods=['GET'])
def custom_message():
response = {
'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'
}
return jsonify(response), 200
# part-2 ends
# creating the transactions
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
# this will help us to extract te post request made in postman like req.params.name in express
json = request.get_json()
# this will hep us to check that all the parameters are present or not for adding the transactions
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(
json['sender'], json['receiver'], json['amount'])
# when the block is mined all the transations in lists is added to block
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json() # we will get request message send from postman
# {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node) # add our nodes to network
response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
# this function will present in every node of blockchain and always checked so that the node remain upadatesd with other blockchains by hitiing replace_chain URL
@ app.route('/replace_chain', methods=['GET'])
def replace_chain():
# using the above defined function in class
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced: # means the current blockchain was the shortest one and it is replaced
response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',
'new_chain': blockchain.chain}
else: # means the current blockchain was not the shortest one and it is not replaced
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
# Running the app
# host= '0.0.0.0' specifies that it is available publicily
app.run(host='0.0.0.0', port=5001)
|
flexible
|
{
"blob_id": "e85d3660968410b83b14ba610150c0c8cc880119",
"index": 9191,
"step-1": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n <mask token>\n <mask token>\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n <mask token>\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n <mask token>\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n block = blockchain.create_block(proof, previous_hash)\n response = {'message':\n 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block\n ['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],\n 'previous_hash': block['previous_hash'], 'transactions': block[\n 'transactions']}\n return jsonify(response), 200\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n<mask token>\n\n\n@app.route('/add_transactions', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(json['sender'], json['receiver'],\n json['amount'])\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n block = blockchain.create_block(proof, previous_hash)\n response = {'message':\n 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block\n ['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],\n 'previous_hash': block['previous_hash'], 'transactions': block[\n 'transactions']}\n return jsonify(response), 200\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n@app.route('/', methods=['GET'])\ndef custom_message():\n response = {'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'}\n return jsonify(response), 200\n\n\n@app.route('/add_transactions', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(json['sender'], json['receiver'],\n json['amount'])\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5001)\n",
"step-4": "import datetime\nimport hashlib\nimport json\nfrom flask import Flask, jsonify, request\nimport requests\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\napp = Flask(__name__)\nblockchain = Blockchain()\nnode_address = str(uuid4()).replace('-', '')\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n block = blockchain.create_block(proof, previous_hash)\n response = {'message':\n 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block\n ['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],\n 'previous_hash': block['previous_hash'], 'transactions': block[\n 'transactions']}\n return jsonify(response), 200\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n@app.route('/', methods=['GET'])\ndef custom_message():\n response = {'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'}\n return jsonify(response), 200\n\n\n@app.route('/add_transactions', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(json['sender'], json['receiver'],\n json['amount'])\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5001)\n",
"step-5": "import datetime # to add timestamps on every block in blockchain\nimport hashlib # library that is ued to hash the block\nimport json # to communicate in json data\n# Flask to implement webservices jsonify to see the jsop message/response\n# request help us to connect all the nodes of the blockchain together froming the p2p network\nfrom flask import Flask, jsonify, request\n# it will help us to verify that all the blockchain have same blockhain or not http requests (used in replace_cahin)\nimport requests\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\n\n# Building a Blockchain\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = [] # our main block chain\n # now we will create the list of transation which will record the all transactions\n self.transactions = []\n # create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block\n self.create_block(proof=0, previous_hash='0')\n # nodes will contains the unique identifier of the address of all nodes in p2p network\n self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it\n # part1\n\n def create_block(self, proof, previous_hash):\n block = { # dictionary of python data structure\n 'index': len(self.chain)+1,\n 'timestamp': str(datetime.datetime.now()),\n 'proof': proof, # works like a nounce of block stops when we reach at or below the target\n 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(\n str(new_proof**2-previous_proof**2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's\n\n # hash of a block is created after generating block thats we have only use previous_hash because its already created\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n # reference of first block stored genesis block\n previous_block = chain[0]\n block_index = 1 # required for iteration\n while block_index < len(chain):\n block = chain[block_index] # cuurent block\n # checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n # verfying the proof of block with the data proof and previous proof it is easy then creating the proof\n hash_operation = hashlib.sha256(\n str(proof**2 - previous_proof**2).encode()).hexdigest()\n # the more is zero's the more is harder to mine the block\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n # functions used to get add the transactions to the lists\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({\n 'senders': senders,\n 'receiver': receiver,\n 'amount': amount\n })\n previous_block = self.get_previous_block()\n # +1 beacause before mining the transaction are added so new_block index will be +1 then previous\n return previous_block['index']+1\n # part-1 ends\n\n # part-3--> dealing with decentarlized application and transactions\n\n # this function allow us to add different nodes to chain\n\n def add_node(self, address): # generating the decentarlized application\n # we need to parse the url before adding it\n parsed_url = urlparse(address)\n # .netloc gives us the unique identifier of the node address removing the unrequired part from it\n self.nodes.add(parsed_url.netloc)\n\n # this function help us to solve the problem of consensus protocols (competing chain)\n\n def replace_chain(self):\n # this variable help us to find the length of longest chain among different network\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes # this variable will hold the address of all the nodes in network\n for node in network:\n # we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length\n # using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200: # this ids the vode chaeck something is received in request\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n # this will happen in every node of network\n if longest_chain:\n # if this chain is shorter than otherit will be updated\n self.chain = longest_chain\n return True\n # if this chain is only longest in network than return false and no update\n return False\n # part-3 ends\n# Mining our Blockchain\n\n\napp = Flask(__name__)\n\n# Creating a Blockchain\n# creating the instance of blockchain\nblockchain = Blockchain()\n\n# Mining the blockchain\n# create an random and unique address for the node on port 5000\n# this is the address used by to send the whale coin when the miner mines the wahle coin\nnode_address = str(uuid4()).replace('-', '')\n\n# part-2\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n # miners price\n # usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n # when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy\n block = blockchain.create_block(proof, previous_hash)\n response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n 'transactions': block['transactions']}\n return jsonify(response), 200\n\n# getting all blocks in chain\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n }\n return jsonify(response), 200\n\n# custom message\n\n\n@app.route('/', methods=['GET'])\ndef custom_message():\n response = {\n 'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'\n }\n return jsonify(response), 200\n\n# part-2 ends\n# creating the transactions\n\n\n@app.route('/add_transactions', methods=['POST'])\ndef add_transaction():\n # this will help us to extract te post request made in postman like req.params.name in express\n json = request.get_json()\n # this will hep us to check that all the parameters are present or not for adding the transactions\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(\n json['sender'], json['receiver'], json['amount'])\n # when the block is mined all the transations in lists is added to block\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json() # we will get request message send from postman\n # {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc\n nodes = json.get('nodes')\n if nodes is None:\n return \"No node\", 400\n for node in nodes:\n blockchain.add_node(node) # add our nodes to network\n response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:',\n 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n# Replacing the chain by the longest chain if needed\n# this function will present in every node of blockchain and always checked so that the node remain upadatesd with other blockchains by hitiing replace_chain URL\n@ app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n # using the above defined function in class\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced: # means the current blockchain was the shortest one and it is replaced\n response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',\n 'new_chain': blockchain.chain}\n else: # means the current blockchain was not the shortest one and it is not replaced\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n# Running the app\n# host= '0.0.0.0' specifies that it is available publicily\napp.run(host='0.0.0.0', port=5001)\n",
"step-ids": [
6,
15,
17,
19,
20
]
}
|
[
6,
15,
17,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_api(env):
assert set(env.parameters.keys()) == {'knowledge_structure',
'action_space', 'learning_item_base'}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_api(env):
assert set(env.parameters.keys()) == {'knowledge_structure',
'action_space', 'learning_item_base'}
@pytest.mark.parametrize('n_step', [True, False])
def test_env(env, tmp_path, n_step):
from EduSim.Envs.KSS import kss_train_eval, KSSAgent
agent = KSSAgent(env.action_space)
kss_train_eval(agent, env, max_steps=20, max_episode_num=10, level=
'summary')
<|reserved_special_token_1|>
import pytest
def test_api(env):
assert set(env.parameters.keys()) == {'knowledge_structure',
'action_space', 'learning_item_base'}
@pytest.mark.parametrize('n_step', [True, False])
def test_env(env, tmp_path, n_step):
from EduSim.Envs.KSS import kss_train_eval, KSSAgent
agent = KSSAgent(env.action_space)
kss_train_eval(agent, env, max_steps=20, max_episode_num=10, level=
'summary')
<|reserved_special_token_1|>
# coding: utf-8
# 2019/11/27 @ tongshiwei
import pytest
def test_api(env):
assert set(env.parameters.keys()) == {"knowledge_structure", "action_space", "learning_item_base"}
@pytest.mark.parametrize("n_step", [True, False])
def test_env(env, tmp_path, n_step):
from EduSim.Envs.KSS import kss_train_eval, KSSAgent
agent = KSSAgent(env.action_space)
kss_train_eval(
agent,
env,
max_steps=20,
max_episode_num=10,
level="summary",
)
|
flexible
|
{
"blob_id": "b1ae3abb6decf4d70bc2372e70cf4f5b868e805d",
"index": 8756,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {'knowledge_structure',\n 'action_space', 'learning_item_base'}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {'knowledge_structure',\n 'action_space', 'learning_item_base'}\n\n\n@pytest.mark.parametrize('n_step', [True, False])\ndef test_env(env, tmp_path, n_step):\n from EduSim.Envs.KSS import kss_train_eval, KSSAgent\n agent = KSSAgent(env.action_space)\n kss_train_eval(agent, env, max_steps=20, max_episode_num=10, level=\n 'summary')\n",
"step-4": "import pytest\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {'knowledge_structure',\n 'action_space', 'learning_item_base'}\n\n\n@pytest.mark.parametrize('n_step', [True, False])\ndef test_env(env, tmp_path, n_step):\n from EduSim.Envs.KSS import kss_train_eval, KSSAgent\n agent = KSSAgent(env.action_space)\n kss_train_eval(agent, env, max_steps=20, max_episode_num=10, level=\n 'summary')\n",
"step-5": "# coding: utf-8\n# 2019/11/27 @ tongshiwei\n\nimport pytest\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {\"knowledge_structure\", \"action_space\", \"learning_item_base\"}\n\n\n@pytest.mark.parametrize(\"n_step\", [True, False])\ndef test_env(env, tmp_path, n_step):\n from EduSim.Envs.KSS import kss_train_eval, KSSAgent\n agent = KSSAgent(env.action_space)\n\n kss_train_eval(\n agent,\n env,\n max_steps=20,\n max_episode_num=10,\n level=\"summary\",\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class State(Enum):
ok = True
error = False
<|reserved_special_token_0|>
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
<|reserved_special_token_0|>
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print('Error connection db {0}'.format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
<|reserved_special_token_0|>
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print('Error connection db {0}'.format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = (
'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'
.format(category, key, value))
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print('Error with setting data to database in {0} category'.format(
category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sqlite3
from enum import Enum
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print('Error connection db {0}'.format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = (
'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'
.format(category, key, value))
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print('Error with setting data to database in {0} category'.format(
category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
<|reserved_special_token_1|>
'''
Module for interaction with database
'''
import sqlite3
from enum import Enum
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print("Error connection db {0}".format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print("Error closing connection")
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = "CREATE TABLE {0} (word varchar(15) primary key, weight real)".format(category)
cursor.execute(query)
except Exception:
state = State.error
print("Error with creating new category")
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = "SELECT * from {0} ORDER BY weight DESC".format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print("Error with getting data from {0} category".format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'.format(category, key, value)
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print("Error with setting data to database in {0} category".format(category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print("Error with getting category file names")
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result"
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print("Error with getting category file names")
finally:
close_db_connection()
return state.value, names
|
flexible
|
{
"blob_id": "9b3c2604b428295eda16030b45cf739e714f3d00",
"index": 1614,
"step-1": "<mask token>\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\n<mask token>\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\n<mask token>\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-2": "<mask token>\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print('Error connection db {0}'.format(DB_NAME))\n connection.close()\n return\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\n<mask token>\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-3": "<mask token>\nDB_NAME = 'categories.db'\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print('Error connection db {0}'.format(DB_NAME))\n connection.close()\n return\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\ndef set_category_data(category, data):\n state = State.ok\n try:\n cursor = get_db_connection()\n for key, value in data:\n query = (\n 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'\n .format(category, key, value))\n cursor.execute(query)\n connection.commit()\n except Exception:\n state = State.error\n print('Error with setting data to database in {0} category'.format(\n category))\n finally:\n close_db_connection()\n return state.value\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-4": "<mask token>\nimport sqlite3\nfrom enum import Enum\nDB_NAME = 'categories.db'\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print('Error connection db {0}'.format(DB_NAME))\n connection.close()\n return\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\ndef set_category_data(category, data):\n state = State.ok\n try:\n cursor = get_db_connection()\n for key, value in data:\n query = (\n 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'\n .format(category, key, value))\n cursor.execute(query)\n connection.commit()\n except Exception:\n state = State.error\n print('Error with setting data to database in {0} category'.format(\n category))\n finally:\n close_db_connection()\n return state.value\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-5": "'''\n Module for interaction with database\n'''\n\nimport sqlite3\nfrom enum import Enum\n\nDB_NAME = 'categories.db'\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print(\"Error connection db {0}\".format(DB_NAME))\n connection.close()\n return\n\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print(\"Error closing connection\")\n\n\ndef create_new_category(category):\n state = State.ok\n\n try:\n cursor = get_db_connection()\n query = \"CREATE TABLE {0} (word varchar(15) primary key, weight real)\".format(category)\n cursor.execute(query)\n except Exception:\n state = State.error\n print(\"Error with creating new category\")\n finally:\n close_db_connection()\n\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n\n try:\n cursor = get_db_connection()\n query = \"SELECT * from {0} ORDER BY weight DESC\".format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print(\"Error with getting data from {0} category\".format(category))\n finally:\n close_db_connection()\n\n return state.value, data\n\n\ndef set_category_data(category, data):\n state = State.ok\n try:\n cursor = get_db_connection()\n for key, value in data:\n query = 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'.format(category, key, value)\n cursor.execute(query)\n\n connection.commit()\n except Exception:\n state = State.error\n print(\"Error with setting data to database in {0} category\".format(category))\n finally:\n close_db_connection()\n\n return state.value\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print(\"Error with getting category file names\")\n finally:\n close_db_connection()\n\n return state.value, names\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result\"\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print(\"Error with getting category file names\")\n finally:\n close_db_connection()\n\n return state.value, names",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
import pandas as pd
# 칼럼값으로 추가 - 함수 작성
# 1. cv_diff_value : 종가 일간 변화량
def cv_diff_value(prevalue, postvalue):
return postvalue - prevalue
# 2. cv_diff_rate : 종가 일간 변화율
def cv_diff_rate(prevalue, postvalue):
return (postvalue - prevalue) / prevalue * 100
# 3. cv_maN_value : 종가의 N일 이동평균
def cv_maN_value(cv, N):
# min_period 옵션을 이용하여 할 수도 있음 // 데이터가 최소 x 개라도 존재하면 이동평균을 구함
str_replay = "N의 값을 다시 입력해주세요"
if 3 <= N <= 5:
return cv.rolling(window=N).mean()
else:
return str_replay
# 4. cv_maN_rate : 종가의 N일 이동평균의 일간 변화율
def cv_maN_rate(cv, N):
str_replay = "N의 값을 다시 입력해주세요"
if 3 <= N <= 5:
# DataFrame 을 list 로 변환
for i in range(cv.index[0], (len(cv)+cv.index[0]), 1):
cv_list.append(cv[i])
# 종가의 N일 이동평균의 일간 변화율을 list 에 담기
for i in range(len(cv_list)-1):
if cv_list[i] != 0:
cv_ma_rate.append((cv_list[i+1] - cv_list[i]) / cv_list[i] * 100)
else:
cv_ma_rate.append(0)
# 종가의 N일 이동평균의 일간 변화율을 소수점 2째자리 까지 표현
for i in range(len(cv_ma_rate)):
cv_ma_rate_round.append(round(cv_ma_rate[i], 2))
return cv_ma_rate_round
else:
return str_replay
# 5. ud_Nd : (a) N일 연속 증가 1, (b) N일 연속 하락 -1, (c) 그렇지 않은 날 0
def ud_Nd(cvdv, N):
cvdv_list = [] # list
un_Nd_list = [] # list
# print(cvdv) # 종가
# print(len(cvdv)) # 길이 : 230
# DataFrame 을 list 로 변환
for i in range(cvdv.index[0], (len(cvdv)+cvdv.index[0]), 1):
cvdv_list.append(cvdv[i])
# 알 수 없는 정보는 '0'으로 두겠다
for i in range(N-2):
un_Nd_list.append(0)
# 상승, 하락, 그렇지 않은 날 계산
for i in range(len(cvdv_list)-N+1): # 0 ~ 225
increase_count = decrease_count = nothing_count = 0
for j in range(N-1): # 0 ~ 3
if cvdv_list[i + j] < cvdv_list[i + j + 1]: # 종가가 상승한 날
increase_count += 1
elif cvdv_list[i + j] > cvdv_list[i + j + 1]: # 종가가 하락한 날
decrease_count += 1
else: # 종가가 상승도 하락도 아닌날
nothing_count += 1
# N일 연속 종가가 상승, 하락, 그렇지 않은 날 판단하고 (N-1)날에 삽입
if increase_count == (N - 1):
un_Nd_list.append(1)
elif decrease_count == (N - 1):
un_Nd_list.append(-1)
else:
un_Nd_list.append(0)
un_Nd_list.append(0) # 마지막날은 판단할 수 없어서 '0' 으로 삽입
return un_Nd_list
# csv 파일 읽어오기 // DataFrame 으로 변경 // NaN값 제거
csv_file_read = open('stock_history.csv', 'r', encoding='euc-kr')
stock_data = pd.read_csv(csv_file_read)
df = pd.DataFrame(stock_data)
stock_DataFrame = df.dropna(axis=1)
# 반복 시작
while True:
# 초기값
cv_amount = [0] # 종가 일간 변화량을 저장할 list
cv_rate = [0] # 종가 일간 변화율을 저장할 list
cv_ma_rate = [0] # 종가의 N일 이동평균의 일간 변화율을 저장할 list
un_Nd_plus = un_Nd_minus = 0 # 20회이상 판단할 count 변수
result3 = [] # 종가의 N일 이동평균을 저장할 list
result4 = [] # 종가 N일 이동평균의 일간 변화율
cv_list = [] # 종가의 N일 이동평균의 일간 변화율을 저장할 list
cv_ma_rate_round = [] # 종가의 N일 이동평균의 일간 변화율을 소수점 2자리로 저장할 list
unNd_list = [] # 종가의 N일 증감을 저장할 list
# 종목을 선택하고 N의 값을 입력받는다
stock_name = input("종목을 입력해주세요 : ")
Number = int(input("N의 값을 입력해주세요 : "))
one_stock = stock_DataFrame.loc[stock_DataFrame["stockname"] == stock_name]
print(one_stock)
close_value = one_stock["close_value"] # 종가만 가져오기
one_stock_copy = one_stock.copy() # DataFrame 에 열을 추가하기 위해 복사
# 종가 일간 변화량
try:
for i in range(close_value.index[0], (len(close_value)+close_value.index[0])-1, 1):
result = cv_diff_value(close_value[i], close_value[i+1])
cv_amount.append(result)
except IndexError:
print("존재하지 않는 항목")
continue
one_stock_copy["cv_diff_value"] = cv_amount # DataFrame 에 데이터 추가
# print(one_stock_copy)
# 종가 일간 변화율 // 종가 일간 변화량과 마찬가지 // 소수점 2자리 표현
for i in range(close_value.index[0], (len(close_value)+close_value.index[0])-1, 1):
result2 = round(cv_diff_rate(close_value[i], close_value[i+1]), 2)
cv_rate.append(result2)
one_stock_copy["cv_diff_rate"] = cv_rate # DataFrame 에 데이터 추가
# print(one_stock_copy)
# 종가 N일 이동평균
res3 = cv_maN_value(close_value, Number)
if isinstance(res3, str):
print(res3)
continue
else:
result3 = res3.fillna(0) # NaN값을 0으로 치환
one_stock_copy["cv_maN_value"] = result3
# print(one_stock_copy)
# 종가 N일 이동평균의 일간 변화율
ma_value = one_stock_copy["cv_maN_value"] # 종가 N일 이동평균 가져오기
result4 = cv_maN_rate(ma_value, Number)
if isinstance(result4, str):
print(result4)
continue
else:
one_stock_copy["cv_maN_rate"] = result4
# print(one_stock_copy)
# N일 연속 상승, 하락, 그렇지 않은 날 파악
result5 = ud_Nd(close_value, Number)
one_stock_copy["ud_Nd"] = result5
# un_Nd = 1, -1이 20회 이상 발생하도록 N을 3 ~ 5로 조정, 종목을 변경
un_Nd_value = one_stock_copy["ud_Nd"] # N일 연속되는 증감 column 가져오기
# DataFrame 을 list 로 변환
for i in range(un_Nd_value.index[0], (len(un_Nd_value)+un_Nd_value.index[0]), 1):
unNd_list.append(un_Nd_value[i])
# 20회 이상 발생하는지 판단
for i in range(len(unNd_list)):
if unNd_list[i] == 1:
un_Nd_plus += 1
if unNd_list[i] == -1:
un_Nd_minus += 1
print(un_Nd_plus)
print(un_Nd_minus)
# 발생했다면 반복문을 종료하고 발생하지 않았다면 N을 조정하거나 종목을 변경한다
if un_Nd_plus >= 20 and un_Nd_minus >= 20:
break
else:
print("un_Nd의 1 or -1 발생횟수가 둘 다 20을 넘지 않았습니다")
continue
# 반복문이 끝나고 20회이상 발생하는 조건을 만족하면 csv 파일(stock_history_added.csv)로 저장
one_stock_copy.to_csv('stock_history_added.csv', encoding='ms949', index=False)
print("Data가 성공적으로 추가됐습니다")
csv_file_read.close()
|
normal
|
{
"blob_id": "a967b97f090a71f28e33c5ca54cb64db3967aea3",
"index": 7002,
"step-1": "<mask token>\n\n\ndef cv_diff_value(prevalue, postvalue):\n return postvalue - prevalue\n\n\ndef cv_diff_rate(prevalue, postvalue):\n return (postvalue - prevalue) / prevalue * 100\n\n\ndef cv_maN_value(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n return cv.rolling(window=N).mean()\n else:\n return str_replay\n\n\ndef cv_maN_rate(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n for i in range(cv.index[0], len(cv) + cv.index[0], 1):\n cv_list.append(cv[i])\n for i in range(len(cv_list) - 1):\n if cv_list[i] != 0:\n cv_ma_rate.append((cv_list[i + 1] - cv_list[i]) / cv_list[i\n ] * 100)\n else:\n cv_ma_rate.append(0)\n for i in range(len(cv_ma_rate)):\n cv_ma_rate_round.append(round(cv_ma_rate[i], 2))\n return cv_ma_rate_round\n else:\n return str_replay\n\n\ndef ud_Nd(cvdv, N):\n cvdv_list = []\n un_Nd_list = []\n for i in range(cvdv.index[0], len(cvdv) + cvdv.index[0], 1):\n cvdv_list.append(cvdv[i])\n for i in range(N - 2):\n un_Nd_list.append(0)\n for i in range(len(cvdv_list) - N + 1):\n increase_count = decrease_count = nothing_count = 0\n for j in range(N - 1):\n if cvdv_list[i + j] < cvdv_list[i + j + 1]:\n increase_count += 1\n elif cvdv_list[i + j] > cvdv_list[i + j + 1]:\n decrease_count += 1\n else:\n nothing_count += 1\n if increase_count == N - 1:\n un_Nd_list.append(1)\n elif decrease_count == N - 1:\n un_Nd_list.append(-1)\n else:\n un_Nd_list.append(0)\n un_Nd_list.append(0)\n return un_Nd_list\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef cv_diff_value(prevalue, postvalue):\n return postvalue - prevalue\n\n\ndef cv_diff_rate(prevalue, postvalue):\n return (postvalue - prevalue) / prevalue * 100\n\n\ndef cv_maN_value(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n return cv.rolling(window=N).mean()\n else:\n return str_replay\n\n\ndef cv_maN_rate(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n for i in range(cv.index[0], len(cv) + cv.index[0], 1):\n cv_list.append(cv[i])\n for i in range(len(cv_list) - 1):\n if cv_list[i] != 0:\n cv_ma_rate.append((cv_list[i + 1] - cv_list[i]) / cv_list[i\n ] * 100)\n else:\n cv_ma_rate.append(0)\n for i in range(len(cv_ma_rate)):\n cv_ma_rate_round.append(round(cv_ma_rate[i], 2))\n return cv_ma_rate_round\n else:\n return str_replay\n\n\ndef ud_Nd(cvdv, N):\n cvdv_list = []\n un_Nd_list = []\n for i in range(cvdv.index[0], len(cvdv) + cvdv.index[0], 1):\n cvdv_list.append(cvdv[i])\n for i in range(N - 2):\n un_Nd_list.append(0)\n for i in range(len(cvdv_list) - N + 1):\n increase_count = decrease_count = nothing_count = 0\n for j in range(N - 1):\n if cvdv_list[i + j] < cvdv_list[i + j + 1]:\n increase_count += 1\n elif cvdv_list[i + j] > cvdv_list[i + j + 1]:\n decrease_count += 1\n else:\n nothing_count += 1\n if increase_count == N - 1:\n un_Nd_list.append(1)\n elif decrease_count == N - 1:\n un_Nd_list.append(-1)\n else:\n un_Nd_list.append(0)\n un_Nd_list.append(0)\n return un_Nd_list\n\n\n<mask token>\nwhile True:\n cv_amount = [0]\n cv_rate = [0]\n cv_ma_rate = [0]\n un_Nd_plus = un_Nd_minus = 0\n result3 = []\n result4 = []\n cv_list = []\n cv_ma_rate_round = []\n unNd_list = []\n stock_name = input('종목을 입력해주세요 : ')\n Number = int(input('N의 값을 입력해주세요 : '))\n one_stock = stock_DataFrame.loc[stock_DataFrame['stockname'] == stock_name]\n print(one_stock)\n close_value = one_stock['close_value']\n one_stock_copy = one_stock.copy()\n try:\n for i in range(close_value.index[0], len(close_value) + close_value\n .index[0] - 1, 1):\n result = cv_diff_value(close_value[i], close_value[i + 1])\n cv_amount.append(result)\n except IndexError:\n print('존재하지 않는 항목')\n continue\n one_stock_copy['cv_diff_value'] = cv_amount\n for i in range(close_value.index[0], len(close_value) + close_value.\n index[0] - 1, 1):\n result2 = round(cv_diff_rate(close_value[i], close_value[i + 1]), 2)\n cv_rate.append(result2)\n one_stock_copy['cv_diff_rate'] = cv_rate\n res3 = cv_maN_value(close_value, Number)\n if isinstance(res3, str):\n print(res3)\n continue\n else:\n result3 = res3.fillna(0)\n one_stock_copy['cv_maN_value'] = result3\n ma_value = one_stock_copy['cv_maN_value']\n result4 = cv_maN_rate(ma_value, Number)\n if isinstance(result4, str):\n print(result4)\n continue\n else:\n one_stock_copy['cv_maN_rate'] = result4\n result5 = ud_Nd(close_value, Number)\n one_stock_copy['ud_Nd'] = result5\n un_Nd_value = one_stock_copy['ud_Nd']\n for i in range(un_Nd_value.index[0], len(un_Nd_value) + un_Nd_value.\n index[0], 1):\n unNd_list.append(un_Nd_value[i])\n for i in range(len(unNd_list)):\n if unNd_list[i] == 1:\n un_Nd_plus += 1\n if unNd_list[i] == -1:\n un_Nd_minus += 1\n print(un_Nd_plus)\n print(un_Nd_minus)\n if un_Nd_plus >= 20 and un_Nd_minus >= 20:\n break\n else:\n print('un_Nd의 1 or -1 발생횟수가 둘 다 20을 넘지 않았습니다')\n continue\none_stock_copy.to_csv('stock_history_added.csv', encoding='ms949', index=False)\nprint('Data가 성공적으로 추가됐습니다')\ncsv_file_read.close()\n",
"step-3": "<mask token>\n\n\ndef cv_diff_value(prevalue, postvalue):\n return postvalue - prevalue\n\n\ndef cv_diff_rate(prevalue, postvalue):\n return (postvalue - prevalue) / prevalue * 100\n\n\ndef cv_maN_value(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n return cv.rolling(window=N).mean()\n else:\n return str_replay\n\n\ndef cv_maN_rate(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n for i in range(cv.index[0], len(cv) + cv.index[0], 1):\n cv_list.append(cv[i])\n for i in range(len(cv_list) - 1):\n if cv_list[i] != 0:\n cv_ma_rate.append((cv_list[i + 1] - cv_list[i]) / cv_list[i\n ] * 100)\n else:\n cv_ma_rate.append(0)\n for i in range(len(cv_ma_rate)):\n cv_ma_rate_round.append(round(cv_ma_rate[i], 2))\n return cv_ma_rate_round\n else:\n return str_replay\n\n\ndef ud_Nd(cvdv, N):\n cvdv_list = []\n un_Nd_list = []\n for i in range(cvdv.index[0], len(cvdv) + cvdv.index[0], 1):\n cvdv_list.append(cvdv[i])\n for i in range(N - 2):\n un_Nd_list.append(0)\n for i in range(len(cvdv_list) - N + 1):\n increase_count = decrease_count = nothing_count = 0\n for j in range(N - 1):\n if cvdv_list[i + j] < cvdv_list[i + j + 1]:\n increase_count += 1\n elif cvdv_list[i + j] > cvdv_list[i + j + 1]:\n decrease_count += 1\n else:\n nothing_count += 1\n if increase_count == N - 1:\n un_Nd_list.append(1)\n elif decrease_count == N - 1:\n un_Nd_list.append(-1)\n else:\n un_Nd_list.append(0)\n un_Nd_list.append(0)\n return un_Nd_list\n\n\ncsv_file_read = open('stock_history.csv', 'r', encoding='euc-kr')\nstock_data = pd.read_csv(csv_file_read)\ndf = pd.DataFrame(stock_data)\nstock_DataFrame = df.dropna(axis=1)\nwhile True:\n cv_amount = [0]\n cv_rate = [0]\n cv_ma_rate = [0]\n un_Nd_plus = un_Nd_minus = 0\n result3 = []\n result4 = []\n cv_list = []\n cv_ma_rate_round = []\n unNd_list = []\n stock_name = input('종목을 입력해주세요 : ')\n Number = int(input('N의 값을 입력해주세요 : '))\n one_stock = stock_DataFrame.loc[stock_DataFrame['stockname'] == stock_name]\n print(one_stock)\n close_value = one_stock['close_value']\n one_stock_copy = one_stock.copy()\n try:\n for i in range(close_value.index[0], len(close_value) + close_value\n .index[0] - 1, 1):\n result = cv_diff_value(close_value[i], close_value[i + 1])\n cv_amount.append(result)\n except IndexError:\n print('존재하지 않는 항목')\n continue\n one_stock_copy['cv_diff_value'] = cv_amount\n for i in range(close_value.index[0], len(close_value) + close_value.\n index[0] - 1, 1):\n result2 = round(cv_diff_rate(close_value[i], close_value[i + 1]), 2)\n cv_rate.append(result2)\n one_stock_copy['cv_diff_rate'] = cv_rate\n res3 = cv_maN_value(close_value, Number)\n if isinstance(res3, str):\n print(res3)\n continue\n else:\n result3 = res3.fillna(0)\n one_stock_copy['cv_maN_value'] = result3\n ma_value = one_stock_copy['cv_maN_value']\n result4 = cv_maN_rate(ma_value, Number)\n if isinstance(result4, str):\n print(result4)\n continue\n else:\n one_stock_copy['cv_maN_rate'] = result4\n result5 = ud_Nd(close_value, Number)\n one_stock_copy['ud_Nd'] = result5\n un_Nd_value = one_stock_copy['ud_Nd']\n for i in range(un_Nd_value.index[0], len(un_Nd_value) + un_Nd_value.\n index[0], 1):\n unNd_list.append(un_Nd_value[i])\n for i in range(len(unNd_list)):\n if unNd_list[i] == 1:\n un_Nd_plus += 1\n if unNd_list[i] == -1:\n un_Nd_minus += 1\n print(un_Nd_plus)\n print(un_Nd_minus)\n if un_Nd_plus >= 20 and un_Nd_minus >= 20:\n break\n else:\n print('un_Nd의 1 or -1 발생횟수가 둘 다 20을 넘지 않았습니다')\n continue\none_stock_copy.to_csv('stock_history_added.csv', encoding='ms949', index=False)\nprint('Data가 성공적으로 추가됐습니다')\ncsv_file_read.close()\n",
"step-4": "import pandas as pd\n\n\ndef cv_diff_value(prevalue, postvalue):\n return postvalue - prevalue\n\n\ndef cv_diff_rate(prevalue, postvalue):\n return (postvalue - prevalue) / prevalue * 100\n\n\ndef cv_maN_value(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n return cv.rolling(window=N).mean()\n else:\n return str_replay\n\n\ndef cv_maN_rate(cv, N):\n str_replay = 'N의 값을 다시 입력해주세요'\n if 3 <= N <= 5:\n for i in range(cv.index[0], len(cv) + cv.index[0], 1):\n cv_list.append(cv[i])\n for i in range(len(cv_list) - 1):\n if cv_list[i] != 0:\n cv_ma_rate.append((cv_list[i + 1] - cv_list[i]) / cv_list[i\n ] * 100)\n else:\n cv_ma_rate.append(0)\n for i in range(len(cv_ma_rate)):\n cv_ma_rate_round.append(round(cv_ma_rate[i], 2))\n return cv_ma_rate_round\n else:\n return str_replay\n\n\ndef ud_Nd(cvdv, N):\n cvdv_list = []\n un_Nd_list = []\n for i in range(cvdv.index[0], len(cvdv) + cvdv.index[0], 1):\n cvdv_list.append(cvdv[i])\n for i in range(N - 2):\n un_Nd_list.append(0)\n for i in range(len(cvdv_list) - N + 1):\n increase_count = decrease_count = nothing_count = 0\n for j in range(N - 1):\n if cvdv_list[i + j] < cvdv_list[i + j + 1]:\n increase_count += 1\n elif cvdv_list[i + j] > cvdv_list[i + j + 1]:\n decrease_count += 1\n else:\n nothing_count += 1\n if increase_count == N - 1:\n un_Nd_list.append(1)\n elif decrease_count == N - 1:\n un_Nd_list.append(-1)\n else:\n un_Nd_list.append(0)\n un_Nd_list.append(0)\n return un_Nd_list\n\n\ncsv_file_read = open('stock_history.csv', 'r', encoding='euc-kr')\nstock_data = pd.read_csv(csv_file_read)\ndf = pd.DataFrame(stock_data)\nstock_DataFrame = df.dropna(axis=1)\nwhile True:\n cv_amount = [0]\n cv_rate = [0]\n cv_ma_rate = [0]\n un_Nd_plus = un_Nd_minus = 0\n result3 = []\n result4 = []\n cv_list = []\n cv_ma_rate_round = []\n unNd_list = []\n stock_name = input('종목을 입력해주세요 : ')\n Number = int(input('N의 값을 입력해주세요 : '))\n one_stock = stock_DataFrame.loc[stock_DataFrame['stockname'] == stock_name]\n print(one_stock)\n close_value = one_stock['close_value']\n one_stock_copy = one_stock.copy()\n try:\n for i in range(close_value.index[0], len(close_value) + close_value\n .index[0] - 1, 1):\n result = cv_diff_value(close_value[i], close_value[i + 1])\n cv_amount.append(result)\n except IndexError:\n print('존재하지 않는 항목')\n continue\n one_stock_copy['cv_diff_value'] = cv_amount\n for i in range(close_value.index[0], len(close_value) + close_value.\n index[0] - 1, 1):\n result2 = round(cv_diff_rate(close_value[i], close_value[i + 1]), 2)\n cv_rate.append(result2)\n one_stock_copy['cv_diff_rate'] = cv_rate\n res3 = cv_maN_value(close_value, Number)\n if isinstance(res3, str):\n print(res3)\n continue\n else:\n result3 = res3.fillna(0)\n one_stock_copy['cv_maN_value'] = result3\n ma_value = one_stock_copy['cv_maN_value']\n result4 = cv_maN_rate(ma_value, Number)\n if isinstance(result4, str):\n print(result4)\n continue\n else:\n one_stock_copy['cv_maN_rate'] = result4\n result5 = ud_Nd(close_value, Number)\n one_stock_copy['ud_Nd'] = result5\n un_Nd_value = one_stock_copy['ud_Nd']\n for i in range(un_Nd_value.index[0], len(un_Nd_value) + un_Nd_value.\n index[0], 1):\n unNd_list.append(un_Nd_value[i])\n for i in range(len(unNd_list)):\n if unNd_list[i] == 1:\n un_Nd_plus += 1\n if unNd_list[i] == -1:\n un_Nd_minus += 1\n print(un_Nd_plus)\n print(un_Nd_minus)\n if un_Nd_plus >= 20 and un_Nd_minus >= 20:\n break\n else:\n print('un_Nd의 1 or -1 발생횟수가 둘 다 20을 넘지 않았습니다')\n continue\none_stock_copy.to_csv('stock_history_added.csv', encoding='ms949', index=False)\nprint('Data가 성공적으로 추가됐습니다')\ncsv_file_read.close()\n",
"step-5": "import pandas as pd\n\n# 칼럼값으로 추가 - 함수 작성\n# 1. cv_diff_value : 종가 일간 변화량\ndef cv_diff_value(prevalue, postvalue):\n return postvalue - prevalue\n\n\n# 2. cv_diff_rate : 종가 일간 변화율\ndef cv_diff_rate(prevalue, postvalue):\n return (postvalue - prevalue) / prevalue * 100\n\n\n# 3. cv_maN_value : 종가의 N일 이동평균\ndef cv_maN_value(cv, N):\n # min_period 옵션을 이용하여 할 수도 있음 // 데이터가 최소 x 개라도 존재하면 이동평균을 구함\n str_replay = \"N의 값을 다시 입력해주세요\"\n if 3 <= N <= 5:\n return cv.rolling(window=N).mean()\n else:\n return str_replay\n\n\n# 4. cv_maN_rate : 종가의 N일 이동평균의 일간 변화율\ndef cv_maN_rate(cv, N):\n str_replay = \"N의 값을 다시 입력해주세요\"\n if 3 <= N <= 5:\n # DataFrame 을 list 로 변환\n for i in range(cv.index[0], (len(cv)+cv.index[0]), 1):\n cv_list.append(cv[i])\n # 종가의 N일 이동평균의 일간 변화율을 list 에 담기\n for i in range(len(cv_list)-1):\n if cv_list[i] != 0:\n cv_ma_rate.append((cv_list[i+1] - cv_list[i]) / cv_list[i] * 100)\n else:\n cv_ma_rate.append(0)\n # 종가의 N일 이동평균의 일간 변화율을 소수점 2째자리 까지 표현\n for i in range(len(cv_ma_rate)):\n cv_ma_rate_round.append(round(cv_ma_rate[i], 2))\n return cv_ma_rate_round\n else:\n return str_replay\n\n\n# 5. ud_Nd : (a) N일 연속 증가 1, (b) N일 연속 하락 -1, (c) 그렇지 않은 날 0\ndef ud_Nd(cvdv, N):\n cvdv_list = [] # list\n un_Nd_list = [] # list\n # print(cvdv) # 종가\n # print(len(cvdv)) # 길이 : 230\n # DataFrame 을 list 로 변환\n for i in range(cvdv.index[0], (len(cvdv)+cvdv.index[0]), 1):\n cvdv_list.append(cvdv[i])\n # 알 수 없는 정보는 '0'으로 두겠다\n for i in range(N-2):\n un_Nd_list.append(0)\n # 상승, 하락, 그렇지 않은 날 계산\n for i in range(len(cvdv_list)-N+1): # 0 ~ 225\n increase_count = decrease_count = nothing_count = 0\n for j in range(N-1): # 0 ~ 3\n if cvdv_list[i + j] < cvdv_list[i + j + 1]: # 종가가 상승한 날\n increase_count += 1\n elif cvdv_list[i + j] > cvdv_list[i + j + 1]: # 종가가 하락한 날\n decrease_count += 1\n else: # 종가가 상승도 하락도 아닌날\n nothing_count += 1\n # N일 연속 종가가 상승, 하락, 그렇지 않은 날 판단하고 (N-1)날에 삽입\n if increase_count == (N - 1):\n un_Nd_list.append(1)\n elif decrease_count == (N - 1):\n un_Nd_list.append(-1)\n else:\n un_Nd_list.append(0)\n un_Nd_list.append(0) # 마지막날은 판단할 수 없어서 '0' 으로 삽입\n return un_Nd_list\n\n\n# csv 파일 읽어오기 // DataFrame 으로 변경 // NaN값 제거\ncsv_file_read = open('stock_history.csv', 'r', encoding='euc-kr')\nstock_data = pd.read_csv(csv_file_read)\ndf = pd.DataFrame(stock_data)\nstock_DataFrame = df.dropna(axis=1)\n\n# 반복 시작\nwhile True:\n # 초기값\n cv_amount = [0] # 종가 일간 변화량을 저장할 list\n cv_rate = [0] # 종가 일간 변화율을 저장할 list\n cv_ma_rate = [0] # 종가의 N일 이동평균의 일간 변화율을 저장할 list\n un_Nd_plus = un_Nd_minus = 0 # 20회이상 판단할 count 변수\n result3 = [] # 종가의 N일 이동평균을 저장할 list\n result4 = [] # 종가 N일 이동평균의 일간 변화율\n cv_list = [] # 종가의 N일 이동평균의 일간 변화율을 저장할 list\n cv_ma_rate_round = [] # 종가의 N일 이동평균의 일간 변화율을 소수점 2자리로 저장할 list\n unNd_list = [] # 종가의 N일 증감을 저장할 list\n\n # 종목을 선택하고 N의 값을 입력받는다\n stock_name = input(\"종목을 입력해주세요 : \")\n Number = int(input(\"N의 값을 입력해주세요 : \"))\n one_stock = stock_DataFrame.loc[stock_DataFrame[\"stockname\"] == stock_name]\n print(one_stock)\n\n close_value = one_stock[\"close_value\"] # 종가만 가져오기\n one_stock_copy = one_stock.copy() # DataFrame 에 열을 추가하기 위해 복사\n\n # 종가 일간 변화량\n try:\n for i in range(close_value.index[0], (len(close_value)+close_value.index[0])-1, 1):\n result = cv_diff_value(close_value[i], close_value[i+1])\n cv_amount.append(result)\n except IndexError:\n print(\"존재하지 않는 항목\")\n continue\n one_stock_copy[\"cv_diff_value\"] = cv_amount # DataFrame 에 데이터 추가\n # print(one_stock_copy)\n\n # 종가 일간 변화율 // 종가 일간 변화량과 마찬가지 // 소수점 2자리 표현\n for i in range(close_value.index[0], (len(close_value)+close_value.index[0])-1, 1):\n result2 = round(cv_diff_rate(close_value[i], close_value[i+1]), 2)\n cv_rate.append(result2)\n one_stock_copy[\"cv_diff_rate\"] = cv_rate # DataFrame 에 데이터 추가\n # print(one_stock_copy)\n\n # 종가 N일 이동평균\n res3 = cv_maN_value(close_value, Number)\n if isinstance(res3, str):\n print(res3)\n continue\n else:\n result3 = res3.fillna(0) # NaN값을 0으로 치환\n one_stock_copy[\"cv_maN_value\"] = result3\n # print(one_stock_copy)\n\n # 종가 N일 이동평균의 일간 변화율\n ma_value = one_stock_copy[\"cv_maN_value\"] # 종가 N일 이동평균 가져오기\n result4 = cv_maN_rate(ma_value, Number)\n if isinstance(result4, str):\n print(result4)\n continue\n else:\n one_stock_copy[\"cv_maN_rate\"] = result4\n # print(one_stock_copy)\n\n # N일 연속 상승, 하락, 그렇지 않은 날 파악\n result5 = ud_Nd(close_value, Number)\n one_stock_copy[\"ud_Nd\"] = result5\n\n # un_Nd = 1, -1이 20회 이상 발생하도록 N을 3 ~ 5로 조정, 종목을 변경\n un_Nd_value = one_stock_copy[\"ud_Nd\"] # N일 연속되는 증감 column 가져오기\n # DataFrame 을 list 로 변환\n for i in range(un_Nd_value.index[0], (len(un_Nd_value)+un_Nd_value.index[0]), 1):\n unNd_list.append(un_Nd_value[i])\n # 20회 이상 발생하는지 판단\n for i in range(len(unNd_list)):\n if unNd_list[i] == 1:\n un_Nd_plus += 1\n if unNd_list[i] == -1:\n un_Nd_minus += 1\n\n print(un_Nd_plus)\n print(un_Nd_minus)\n\n# 발생했다면 반복문을 종료하고 발생하지 않았다면 N을 조정하거나 종목을 변경한다\n if un_Nd_plus >= 20 and un_Nd_minus >= 20:\n break\n else:\n print(\"un_Nd의 1 or -1 발생횟수가 둘 다 20을 넘지 않았습니다\")\n continue\n\n# 반복문이 끝나고 20회이상 발생하는 조건을 만족하면 csv 파일(stock_history_added.csv)로 저장\none_stock_copy.to_csv('stock_history_added.csv', encoding='ms949', index=False)\nprint(\"Data가 성공적으로 추가됐습니다\")\ncsv_file_read.close()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: contact@mailslurp.dev
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailslurp_client.api_client import ApiClient
from mailslurp_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class FormControllerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def submit_form(self, **kwargs): # noqa: E501
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_form_with_http_info(**kwargs) # noqa: E501
def submit_form_with_http_info(self, **kwargs): # noqa: E501
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'to',
'subject',
'redirect_to',
'email_address',
'success_message',
'spam_check',
'other_parameters'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_form" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('_to', local_var_params['to'])) # noqa: E501
if 'subject' in local_var_params and local_var_params['subject'] is not None: # noqa: E501
query_params.append(('_subject', local_var_params['subject'])) # noqa: E501
if 'redirect_to' in local_var_params and local_var_params['redirect_to'] is not None: # noqa: E501
query_params.append(('_redirectTo', local_var_params['redirect_to'])) # noqa: E501
if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501
query_params.append(('_emailAddress', local_var_params['email_address'])) # noqa: E501
if 'success_message' in local_var_params and local_var_params['success_message'] is not None: # noqa: E501
query_params.append(('_successMessage', local_var_params['success_message'])) # noqa: E501
if 'spam_check' in local_var_params and local_var_params['spam_check'] is not None: # noqa: E501
query_params.append(('_spamCheck', local_var_params['spam_check'])) # noqa: E501
if 'other_parameters' in local_var_params and local_var_params['other_parameters'] is not None: # noqa: E501
query_params.append(('otherParameters', local_var_params['other_parameters'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/forms', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
normal
|
{
"blob_id": "a4ccf373695b7df60039bc8f6440a6ad43d265c1",
"index": 3750,
"step-1": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n <mask token>\n <mask token>\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-2": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n <mask token>\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-3": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-4": "<mask token>\n\n\nclass FormControllerApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-5": "# coding: utf-8\n\n\"\"\"\n MailSlurp API\n\n MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501\n\n The version of the OpenAPI document: 6.5.2\n Contact: contact@mailslurp.dev\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom mailslurp_client.api_client import ApiClient\nfrom mailslurp_client.exceptions import ( # noqa: F401\n ApiTypeError,\n ApiValueError\n)\n\n\nclass FormControllerApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs): # noqa: E501\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\\\"https://python.api.mailslurp.com/forms\\\" method=\\\"post\\\" > <input name=\\\"_to\\\" type=\\\"hidden\\\" value=\\\"test@example.com\\\"/> <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` #### URL Example ```html <form action=\\\"https://python.api.mailslurp.com/forms?_to=test@example.com\\\" method=\\\"post\\\" > <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\\\"multipart/form-data\\\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs) # noqa: E501\n\n def submit_form_with_http_info(self, **kwargs): # noqa: E501\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\\\"https://python.api.mailslurp.com/forms\\\" method=\\\"post\\\" > <input name=\\\"_to\\\" type=\\\"hidden\\\" value=\\\"test@example.com\\\"/> <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` #### URL Example ```html <form action=\\\"https://python.api.mailslurp.com/forms?_to=test@example.com\\\" method=\\\"post\\\" > <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\\\"multipart/form-data\\\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n local_var_params = locals()\n\n all_params = [\n 'to',\n 'subject',\n 'redirect_to',\n 'email_address',\n 'success_message',\n 'spam_check',\n 'other_parameters'\n ]\n all_params.extend(\n [\n 'async_req',\n '_return_http_data_only',\n '_preload_content',\n '_request_timeout'\n ]\n )\n\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method submit_form\" % key\n )\n local_var_params[key] = val\n del local_var_params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501\n query_params.append(('_to', local_var_params['to'])) # noqa: E501\n if 'subject' in local_var_params and local_var_params['subject'] is not None: # noqa: E501\n query_params.append(('_subject', local_var_params['subject'])) # noqa: E501\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'] is not None: # noqa: E501\n query_params.append(('_redirectTo', local_var_params['redirect_to'])) # noqa: E501\n if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501\n query_params.append(('_emailAddress', local_var_params['email_address'])) # noqa: E501\n if 'success_message' in local_var_params and local_var_params['success_message'] is not None: # noqa: E501\n query_params.append(('_successMessage', local_var_params['success_message'])) # noqa: E501\n if 'spam_check' in local_var_params and local_var_params['spam_check'] is not None: # noqa: E501\n query_params.append(('_spamCheck', local_var_params['spam_check'])) # noqa: E501\n if 'other_parameters' in local_var_params and local_var_params['other_parameters'] is not None: # noqa: E501\n query_params.append(('otherParameters', local_var_params['other_parameters'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['API_KEY'] # noqa: E501\n\n return self.api_client.call_api(\n '/forms', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='str', # noqa: E501\n auth_settings=auth_settings,\n async_req=local_var_params.get('async_req'),\n _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501\n _preload_content=local_var_params.get('_preload_content', True),\n _request_timeout=local_var_params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
# Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
account_sid = 'AC76d9b17b2c23170b7019924f709f366b'
auth_token = '8fba7a54c6e3dc3754043b3865fa9d82'
client = Client(account_sid, auth_token)
user_sample = [
{
"_id": "5e804c501c9d440000986adc",
"name": "Lizzie Siegle",
"milesRan": 20,
"milesGoal": 30
},
{
"_id": "5e804c501c9d440000986adc",
"name": "Jeff Lawson",
"milesRan": 5,
"milesGoal": 20
}
]
if (user_sample[0].get("milesRan") >= user_sample[1].get("milesGoal")):
message = client.messages \
.create(
body='Oh, no! ' + user_sample[0].get("name") + ' surpassed your running goal this week. Get moving to keep the lead!',
from_='+13107364584',
to='+19162673363'
)
print(message.sid)
else:
print("Nothing sent!");
|
normal
|
{
"blob_id": "67eb9985fc0ae9a00ce84a2460b69b00df1c9096",
"index": 3310,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif user_sample[0].get('milesRan') >= user_sample[1].get('milesGoal'):\n message = client.messages.create(body='Oh, no! ' + user_sample[0].get(\n 'name') +\n ' surpassed your running goal this week. Get moving to keep the lead!',\n from_='+13107364584', to='+19162673363')\n print(message.sid)\nelse:\n print('Nothing sent!')\n",
"step-3": "<mask token>\naccount_sid = 'AC76d9b17b2c23170b7019924f709f366b'\nauth_token = '8fba7a54c6e3dc3754043b3865fa9d82'\nclient = Client(account_sid, auth_token)\nuser_sample = [{'_id': '5e804c501c9d440000986adc', 'name': 'Lizzie Siegle',\n 'milesRan': 20, 'milesGoal': 30}, {'_id': '5e804c501c9d440000986adc',\n 'name': 'Jeff Lawson', 'milesRan': 5, 'milesGoal': 20}]\nif user_sample[0].get('milesRan') >= user_sample[1].get('milesGoal'):\n message = client.messages.create(body='Oh, no! ' + user_sample[0].get(\n 'name') +\n ' surpassed your running goal this week. Get moving to keep the lead!',\n from_='+13107364584', to='+19162673363')\n print(message.sid)\nelse:\n print('Nothing sent!')\n",
"step-4": "from twilio.rest import Client\naccount_sid = 'AC76d9b17b2c23170b7019924f709f366b'\nauth_token = '8fba7a54c6e3dc3754043b3865fa9d82'\nclient = Client(account_sid, auth_token)\nuser_sample = [{'_id': '5e804c501c9d440000986adc', 'name': 'Lizzie Siegle',\n 'milesRan': 20, 'milesGoal': 30}, {'_id': '5e804c501c9d440000986adc',\n 'name': 'Jeff Lawson', 'milesRan': 5, 'milesGoal': 20}]\nif user_sample[0].get('milesRan') >= user_sample[1].get('milesGoal'):\n message = client.messages.create(body='Oh, no! ' + user_sample[0].get(\n 'name') +\n ' surpassed your running goal this week. Get moving to keep the lead!',\n from_='+13107364584', to='+19162673363')\n print(message.sid)\nelse:\n print('Nothing sent!')\n",
"step-5": "# Download the helper library from https://www.twilio.com/docs/python/install\nfrom twilio.rest import Client\n\naccount_sid = 'AC76d9b17b2c23170b7019924f709f366b'\nauth_token = '8fba7a54c6e3dc3754043b3865fa9d82'\nclient = Client(account_sid, auth_token)\n\nuser_sample = [\n {\n \"_id\": \"5e804c501c9d440000986adc\",\n \"name\": \"Lizzie Siegle\",\n \"milesRan\": 20,\n \"milesGoal\": 30\n },\n {\n \"_id\": \"5e804c501c9d440000986adc\",\n \"name\": \"Jeff Lawson\",\n \"milesRan\": 5,\n \"milesGoal\": 20\n }\n]\n\nif (user_sample[0].get(\"milesRan\") >= user_sample[1].get(\"milesGoal\")):\n message = client.messages \\\n .create(\n body='Oh, no! ' + user_sample[0].get(\"name\") + ' surpassed your running goal this week. Get moving to keep the lead!',\n from_='+13107364584',\n to='+19162673363'\n )\n print(message.sid)\nelse:\n print(\"Nothing sent!\");\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render,redirect,get_object_or_404
from .models import Blog,UseCase,Comment
from courses.models import offerings
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.template.defaultfilters import slugify
from .forms import addMainContent
# Create your views here.
def home(request):
only_pub_blog = Blog.objects.filter(status=1)
only_active_course = offerings.objects.filter(course_status=1)
ony_pub_ucase = UseCase.objects.filter(usecase_status=1)
return render(request,'dlblog/home.html',{'blog':only_pub_blog[0:3],'usecase':ony_pub_ucase[0:3],'courses':only_active_course[0:3]})
def blogs(request):
only_pub_blog = Blog.objects.filter(status=1)
return render(request,'dlblog/blogs.html',{'blog':only_pub_blog})
'''
def login(request):
if request.method == 'POST':
user = auth.authenticate(username=request.POST['username'],password=request.POST['password'])
# if user is present
if user is not None:
# do the login
auth.login(request,user)
# if user is present and enters valid credentials
return render(request,'dlonboarding/userhome.html',{'user':user})
else:
return render(request,'dlblog/signin.html',{'error':"Please enter valid Credentials!"})
else:
return render(request, 'dlblog/signin.html',{'error':"Please enter valid Credentials!"})
'''
@login_required
def newblog(request):
return render(request,'dlblog/newblog.html')
'''
@login_required
def create(request):
if request.method == 'POST':
if request.FILES['blog_main_image'] and request.POST['title'] and request.POST['summary'] and request.POST['content']:
blog = Blog()
# Fields 1 Blog Image.
blog.blog_main_image = request.FILES['blog_main_image']
# Fields 2 Blog Title.
blog.title = request.POST['title']
# Fields 3 Blog Summary.
blog.summary = request.POST['summary']
# Fields 4 Blog Slug.
blog.slug = slugify(request.POST['title'])
# Fields 5 Blog Author.
blog.author = request.user
# Fields 6 Blog Content
blog.content = request.POST['content']
blog.save()
return redirect('/dlblog/' + blog.slug)
else:
return render(request, 'dlblog/newblog.html', {'error': 'Please enter all details'})
else:
return render(request, 'dlblog/newblog.html')
'''
def add_comment(request):
if request.method == "POST":
form = addMainContent(request.POST,request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.author = request.user
instance.slug = slugify(request.POST['title'])
instance.save()
return redirect('/dlblog/'+instance.slug)
else:
form = addMainContent()
return render(request, 'dlblog/blog_home.html',{'form':form})
@login_required
def create(request):
if request.method == "POST":
form = addMainContent(request.POST,request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.author = request.user
instance.slug = slugify(request.POST['title'])
instance.save()
return redirect('/dlblog/'+instance.slug)
else:
form = addMainContent()
return render(request, 'dlblog/newblog.html',{'form':form})
@login_required
def edit_blog(request,slug):
blog = get_object_or_404(Blog, slug=slug)
if request.method == "POST":
form = addMainContent(request.POST ,request.FILES,instance=blog)
try:
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return redirect('/dlblog/'+instance.slug)
except Exception as e:
print("Error :", e)
else:
form = addMainContent(instance=blog)
slug = blog.slug
return render(request, 'dlblog/editblog.html', {'form': form,'slug':slug})
def blog_home(request,slug):
blog = get_object_or_404(Blog, slug=slug)
return render(request,'dlblog/blog_home.html',{'blog': blog})
|
normal
|
{
"blob_id": "70fcf25cd7d70972e8042dc882f6ecb12d36461a",
"index": 3353,
"step-1": "<mask token>\n\n\ndef blogs(request):\n only_pub_blog = Blog.objects.filter(status=1)\n return render(request, 'dlblog/blogs.html', {'blog': only_pub_blog})\n\n\n<mask token>\n\n\n@login_required\ndef newblog(request):\n return render(request, 'dlblog/newblog.html')\n\n\n<mask token>\n\n\n@login_required\ndef create(request):\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/newblog.html', {'form': form})\n\n\n@login_required\ndef edit_blog(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES, instance=blog)\n try:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n except Exception as e:\n print('Error :', e)\n else:\n form = addMainContent(instance=blog)\n slug = blog.slug\n return render(request, 'dlblog/editblog.html', {'form': form, 'slug': slug}\n )\n\n\ndef blog_home(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n return render(request, 'dlblog/blog_home.html', {'blog': blog})\n",
"step-2": "<mask token>\n\n\ndef home(request):\n only_pub_blog = Blog.objects.filter(status=1)\n only_active_course = offerings.objects.filter(course_status=1)\n ony_pub_ucase = UseCase.objects.filter(usecase_status=1)\n return render(request, 'dlblog/home.html', {'blog': only_pub_blog[0:3],\n 'usecase': ony_pub_ucase[0:3], 'courses': only_active_course[0:3]})\n\n\ndef blogs(request):\n only_pub_blog = Blog.objects.filter(status=1)\n return render(request, 'dlblog/blogs.html', {'blog': only_pub_blog})\n\n\n<mask token>\n\n\n@login_required\ndef newblog(request):\n return render(request, 'dlblog/newblog.html')\n\n\n<mask token>\n\n\n@login_required\ndef create(request):\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/newblog.html', {'form': form})\n\n\n@login_required\ndef edit_blog(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES, instance=blog)\n try:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n except Exception as e:\n print('Error :', e)\n else:\n form = addMainContent(instance=blog)\n slug = blog.slug\n return render(request, 'dlblog/editblog.html', {'form': form, 'slug': slug}\n )\n\n\ndef blog_home(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n return render(request, 'dlblog/blog_home.html', {'blog': blog})\n",
"step-3": "<mask token>\n\n\ndef home(request):\n only_pub_blog = Blog.objects.filter(status=1)\n only_active_course = offerings.objects.filter(course_status=1)\n ony_pub_ucase = UseCase.objects.filter(usecase_status=1)\n return render(request, 'dlblog/home.html', {'blog': only_pub_blog[0:3],\n 'usecase': ony_pub_ucase[0:3], 'courses': only_active_course[0:3]})\n\n\ndef blogs(request):\n only_pub_blog = Blog.objects.filter(status=1)\n return render(request, 'dlblog/blogs.html', {'blog': only_pub_blog})\n\n\n<mask token>\n\n\n@login_required\ndef newblog(request):\n return render(request, 'dlblog/newblog.html')\n\n\n<mask token>\n\n\ndef add_comment(request):\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/blog_home.html', {'form': form})\n\n\n@login_required\ndef create(request):\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/newblog.html', {'form': form})\n\n\n@login_required\ndef edit_blog(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES, instance=blog)\n try:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n except Exception as e:\n print('Error :', e)\n else:\n form = addMainContent(instance=blog)\n slug = blog.slug\n return render(request, 'dlblog/editblog.html', {'form': form, 'slug': slug}\n )\n\n\ndef blog_home(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n return render(request, 'dlblog/blog_home.html', {'blog': blog})\n",
"step-4": "from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Blog, UseCase, Comment\nfrom courses.models import offerings\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom django.template.defaultfilters import slugify\nfrom .forms import addMainContent\n\n\ndef home(request):\n only_pub_blog = Blog.objects.filter(status=1)\n only_active_course = offerings.objects.filter(course_status=1)\n ony_pub_ucase = UseCase.objects.filter(usecase_status=1)\n return render(request, 'dlblog/home.html', {'blog': only_pub_blog[0:3],\n 'usecase': ony_pub_ucase[0:3], 'courses': only_active_course[0:3]})\n\n\ndef blogs(request):\n only_pub_blog = Blog.objects.filter(status=1)\n return render(request, 'dlblog/blogs.html', {'blog': only_pub_blog})\n\n\n<mask token>\n\n\n@login_required\ndef newblog(request):\n return render(request, 'dlblog/newblog.html')\n\n\n<mask token>\n\n\ndef add_comment(request):\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/blog_home.html', {'form': form})\n\n\n@login_required\ndef create(request):\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/newblog.html', {'form': form})\n\n\n@login_required\ndef edit_blog(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n if request.method == 'POST':\n form = addMainContent(request.POST, request.FILES, instance=blog)\n try:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('/dlblog/' + instance.slug)\n except Exception as e:\n print('Error :', e)\n else:\n form = addMainContent(instance=blog)\n slug = blog.slug\n return render(request, 'dlblog/editblog.html', {'form': form, 'slug': slug}\n )\n\n\ndef blog_home(request, slug):\n blog = get_object_or_404(Blog, slug=slug)\n return render(request, 'dlblog/blog_home.html', {'blog': blog})\n",
"step-5": "from django.shortcuts import render,redirect,get_object_or_404\nfrom .models import Blog,UseCase,Comment\nfrom courses.models import offerings\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom django.template.defaultfilters import slugify\nfrom .forms import addMainContent\n\n\n# Create your views here.\n\ndef home(request):\n only_pub_blog = Blog.objects.filter(status=1)\n only_active_course = offerings.objects.filter(course_status=1)\n ony_pub_ucase = UseCase.objects.filter(usecase_status=1)\n return render(request,'dlblog/home.html',{'blog':only_pub_blog[0:3],'usecase':ony_pub_ucase[0:3],'courses':only_active_course[0:3]})\n\ndef blogs(request):\n only_pub_blog = Blog.objects.filter(status=1)\n return render(request,'dlblog/blogs.html',{'blog':only_pub_blog})\n\n'''\ndef login(request):\n\n if request.method == 'POST':\n user = auth.authenticate(username=request.POST['username'],password=request.POST['password'])\n\n\n # if user is present\n if user is not None:\n # do the login\n auth.login(request,user)\n # if user is present and enters valid credentials\n return render(request,'dlonboarding/userhome.html',{'user':user})\n else:\n return render(request,'dlblog/signin.html',{'error':\"Please enter valid Credentials!\"})\n else:\n return render(request, 'dlblog/signin.html',{'error':\"Please enter valid Credentials!\"})\n\n'''\n\n@login_required\ndef newblog(request):\n return render(request,'dlblog/newblog.html')\n\n'''\n\n\n@login_required\ndef create(request):\n if request.method == 'POST':\n if request.FILES['blog_main_image'] and request.POST['title'] and request.POST['summary'] and request.POST['content']:\n\n blog = Blog()\n # Fields 1 Blog Image.\n blog.blog_main_image = request.FILES['blog_main_image']\n # Fields 2 Blog Title.\n blog.title = request.POST['title']\n # Fields 3 Blog Summary.\n blog.summary = request.POST['summary']\n # Fields 4 Blog Slug.\n blog.slug = slugify(request.POST['title'])\n # Fields 5 Blog Author.\n blog.author = request.user\n # Fields 6 Blog Content\n blog.content = request.POST['content']\n blog.save()\n return redirect('/dlblog/' + blog.slug)\n\n else:\n return render(request, 'dlblog/newblog.html', {'error': 'Please enter all details'})\n else:\n return render(request, 'dlblog/newblog.html')\n\n\n'''\n\ndef add_comment(request):\n if request.method == \"POST\":\n form = addMainContent(request.POST,request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/'+instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/blog_home.html',{'form':form})\n\n@login_required\ndef create(request):\n if request.method == \"POST\":\n form = addMainContent(request.POST,request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.slug = slugify(request.POST['title'])\n instance.save()\n return redirect('/dlblog/'+instance.slug)\n else:\n form = addMainContent()\n return render(request, 'dlblog/newblog.html',{'form':form})\n\n\n@login_required\ndef edit_blog(request,slug):\n blog = get_object_or_404(Blog, slug=slug)\n if request.method == \"POST\":\n form = addMainContent(request.POST ,request.FILES,instance=blog)\n try:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('/dlblog/'+instance.slug)\n except Exception as e:\n print(\"Error :\", e)\n else:\n form = addMainContent(instance=blog)\n slug = blog.slug\n\n return render(request, 'dlblog/editblog.html', {'form': form,'slug':slug})\n\n\ndef blog_home(request,slug):\n blog = get_object_or_404(Blog, slug=slug)\n\n return render(request,'dlblog/blog_home.html',{'blog': blog})\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class Normalizer:
<|reserved_special_token_0|>
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin
) + newmin
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
<|reserved_special_token_0|>
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a / ((b + 1) / 256)
d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin
) + newmin
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
def normalizeVideo(self, background, video_reader, video_writer):
f = 1
while True:
ret, frame = video_reader.read()
if not ret:
break
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
f += 1
normal_frame = self.normalizeFrame(background, frame)
video_writer.write(normal_frame)
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a / ((b + 1) / 256)
d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin
) + newmin
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='video to process')
parser.add_argument('background', help='background image')
parser.add_argument('output_video', help='file to save normalized video to'
)
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if not os.path.isfile(opts.input_video):
parser.error('Video file %s does not exist.' % opts.input_video)
if not os.path.isfile(opts.background):
parser.error('Image file %s does not exist.' % opts.background)
norm = Normalizer()
norm.normalize(opts.background, opts.input_video, opts.output_video)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
def normalizeVideo(self, background, video_reader, video_writer):
f = 1
while True:
ret, frame = video_reader.read()
if not ret:
break
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
f += 1
normal_frame = self.normalizeFrame(background, frame)
video_writer.write(normal_frame)
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a / ((b + 1) / 256)
d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin
) + newmin
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='video to process')
parser.add_argument('background', help='background image')
parser.add_argument('output_video', help='file to save normalized video to'
)
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if not os.path.isfile(opts.input_video):
parser.error('Video file %s does not exist.' % opts.input_video)
if not os.path.isfile(opts.background):
parser.error('Image file %s does not exist.' % opts.background)
norm = Normalizer()
norm.normalize(opts.background, opts.input_video, opts.output_video)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#! /usr/bin/env python
"""
Normalizes a vidoe by dividing against it's background.
See: BackgroundExtractor.py to get the background of a video.
USING:
As a command line utility:
$ Normalizer.py input_video input_image output_video
As a module:
from Normalizer import Normalizer
norm = Normalizer("input_video.avi", input_image, "output_video.avi")
norm.normalize()
Author: Martin Humphreys
"""
from argparse import ArgumentParser
import numpy as np
import os
import cv2
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
def normalizeVideo(self, background, video_reader, video_writer):
f = 1
while(True):
ret, frame = video_reader.read()
if not ret:
break;
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
f += 1
normal_frame = self.normalizeFrame(background, frame)
video_writer.write(normal_frame)
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a/((b+1)/256)
d = c*(c < 255)+255*np.ones(np.shape(c))*(c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (((value - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='video to process')
parser.add_argument('background', help='background image')
parser.add_argument('output_video', help='file to save normalized video to')
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if not os.path.isfile(opts.input_video):
parser.error("Video file %s does not exist." % opts.input_video)
if not os.path.isfile(opts.background):
parser.error("Image file %s does not exist." % opts.background)
norm = Normalizer()
norm.normalize(opts.background, opts.input_video, opts.output_video)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "141e0f20ce912ecf21940f78e9f40cb86b91dc2b",
"index": 6121,
"step-1": "<mask token>\n\n\nclass Normalizer:\n <mask token>\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n <mask token>\n <mask token>\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n <mask token>\n\n def normalizeFrame(self, background, frame):\n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n c = a / ((b + 1) / 256)\n d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)\n return d.astype('uint8')\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n\n def normalizeVideo(self, background, video_reader, video_writer):\n f = 1\n while True:\n ret, frame = video_reader.read()\n if not ret:\n break\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n f += 1\n normal_frame = self.normalizeFrame(background, frame)\n video_writer.write(normal_frame)\n\n def normalizeFrame(self, background, frame):\n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n c = a / ((b + 1) / 256)\n d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)\n return d.astype('uint8')\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument('input_video', help='video to process')\n parser.add_argument('background', help='background image')\n parser.add_argument('output_video', help='file to save normalized video to'\n )\n return parser\n\n\ndef main():\n parser = build_parser()\n opts = parser.parse_args()\n if not os.path.isfile(opts.input_video):\n parser.error('Video file %s does not exist.' % opts.input_video)\n if not os.path.isfile(opts.background):\n parser.error('Image file %s does not exist.' % opts.background)\n norm = Normalizer()\n norm.normalize(opts.background, opts.input_video, opts.output_video)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n\n def normalizeVideo(self, background, video_reader, video_writer):\n f = 1\n while True:\n ret, frame = video_reader.read()\n if not ret:\n break\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n f += 1\n normal_frame = self.normalizeFrame(background, frame)\n video_writer.write(normal_frame)\n\n def normalizeFrame(self, background, frame):\n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n c = a / ((b + 1) / 256)\n d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)\n return d.astype('uint8')\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument('input_video', help='video to process')\n parser.add_argument('background', help='background image')\n parser.add_argument('output_video', help='file to save normalized video to'\n )\n return parser\n\n\ndef main():\n parser = build_parser()\n opts = parser.parse_args()\n if not os.path.isfile(opts.input_video):\n parser.error('Video file %s does not exist.' % opts.input_video)\n if not os.path.isfile(opts.background):\n parser.error('Image file %s does not exist.' % opts.background)\n norm = Normalizer()\n norm.normalize(opts.background, opts.input_video, opts.output_video)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#! /usr/bin/env python\n\"\"\"\nNormalizes a vidoe by dividing against it's background.\nSee: BackgroundExtractor.py to get the background of a video.\n\nUSING:\n\n As a command line utility:\n \n $ Normalizer.py input_video input_image output_video\n \n As a module:\n \n from Normalizer import Normalizer\n norm = Normalizer(\"input_video.avi\", input_image, \"output_video.avi\")\n norm.normalize()\n \n\nAuthor: Martin Humphreys\n\"\"\"\n\nfrom argparse import ArgumentParser\nimport numpy as np\nimport os\nimport cv2\n\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n \n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n \n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n \n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS)) \n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n \n self.normalizeVideo(background, vc, vw)\n \n \n def normalizeVideo(self, background, video_reader, video_writer):\n f = 1\n while(True):\n ret, frame = video_reader.read()\n \n if not ret:\n break;\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n f += 1\n normal_frame = self.normalizeFrame(background, frame)\n video_writer.write(normal_frame)\n \n def normalizeFrame(self, background, frame):\n \n \n \n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n \n \n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n \n \n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n \n \n c = a/((b+1)/256)\n d = c*(c < 255)+255*np.ones(np.shape(c))*(c > 255)\n \n return d.astype('uint8') \n \n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (((value - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin\n \ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument('input_video', help='video to process')\n parser.add_argument('background', help='background image')\n parser.add_argument('output_video', help='file to save normalized video to')\n return parser\n\n\ndef main():\n parser = build_parser()\n opts = parser.parse_args()\n if not os.path.isfile(opts.input_video):\n parser.error(\"Video file %s does not exist.\" % opts.input_video)\n if not os.path.isfile(opts.background):\n parser.error(\"Image file %s does not exist.\" % opts.background)\n norm = Normalizer()\n norm.normalize(opts.background, opts.input_video, opts.output_video)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
7,
10,
11,
13
]
}
|
[
5,
7,
10,
11,
13
] |
<|reserved_special_token_0|>
def RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1) + 2)
y2[1:-1] = y1
y2[0] = 2 * y2[1] - y2[2]
y2[-1] = 2 * y2[-2] - y2[-3]
x2 = numpy.zeros(numpy.size(x1) + 2)
x2[1:-1] = x1
x2[0] = 2 * x2[1] - x2[2]
x2[-1] = 2 * x2[-2] - x2[-3]
map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))
map2[1:-1, 1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass:
pass
class SpectralEnergyDistribution:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Nlambda(self, lambda_):
if self.type == 'BB':
T = self.info[0]
x = 14387.769 / lambda_ / T
return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *
numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])
else:
print('ERROR: Invalid SED type')
exit()
class Filter:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Tlambda(self, lambda_):
if self.type == 'STH':
lmin = self.info[0]
dlmin = lmin * 0.02
lmax = self.info[1]
dlmax = lmax * 0.02
return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((
lambda_ - lmax) / dlmax)) / 2.0
elif self.type == 'interp':
return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])
else:
print('ERROR: Invalid filter type')
exit()
<|reserved_special_token_0|>
def onescut(n):
array = numpy.ones(n + 1)
array[0] = array[-1] = 0.5
return array / n
def gq_weights(sed, filter, nOrder, wlrange):
lmin = wlrange[0]
lmax = wlrange[1]
npts = wlrange[2]
x = numpy.linspace(lmin, lmax, npts)
c = numpy.zeros(npts)
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones(npts)
I = numpy.zeros(2 * nOrder)
lctr = numpy.mean(x)
for k in range(2 * nOrder):
I[k] = numpy.sum(o * (x - lctr) ** k * c)
coef = numpy.zeros(nOrder + 1)
coef[0] = 1.0
A = numpy.zeros((nOrder, nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k, j] = I[j + k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True
)
wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot, wroot
<|reserved_special_token_0|>
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo
) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])
if hasattr(addInfo, 'vtpe'):
out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *
ov, x:x + ov])
if hasattr(addInfo, 'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
for k in range(25):
dy = k % 5 - 2
dx = k // 5 - 2
out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov,
x + dx:x + dx + ov])
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo, 'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
ah = 0
if hasattr(addInfo, 'bfe_a'):
ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
ah += addInfo.bfe_aplus
for i in range(psSize - 1):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]
) / 2.0
dout[j, i] += shift * mflux
dout[j, i + 1] -= shift * mflux
av = 0
if hasattr(addInfo, 'bfe_a'):
av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
av -= addInfo.bfe_aplus
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize - 1):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]
) / 2.0
dout[j, i] += shift * mflux
dout[j + 1, i] -= shift * mflux
out += dout
if hasattr(addInfo, 'bfe_overwrite'):
out = dout
return out
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
addInfoX = copy.deepcopy(addInfo)
addInfoX.ctr = numpy.zeros(2)
addInfoX.F = 1.0
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method
='direct') / ov ** 2
Np = N + ov - 1
mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy
.float64)
newmom = numpy.zeros_like(mom)
con = 0.5
xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (
Np, 1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3] * mom[5] - mom[4] ** 2
xx = xx1 - mom[1]
yy = yy1 - mom[2]
G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] *
yy ** 2) / 2.0 / det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G * xx)
newmom[2] = numpy.sum(G * yy)
newmom[3] = numpy.sum(G * xx ** 2)
newmom[4] = numpy.sum(G * xx * yy)
newmom[5] = numpy.sum(G * yy ** 2)
mom[0] = 2 * newmom[0]
err = newmom[1:] / newmom[0]
err[-3:] -= mom[-3:] / 2.0
mom[1:] += err * con
return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /
ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3
] + mom[5])])
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs + var, 1e-24)
return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /
obs2)) * 2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1) + 2)
y2[1:-1] = y1
y2[0] = 2 * y2[1] - y2[2]
y2[-1] = 2 * y2[-2] - y2[-3]
x2 = numpy.zeros(numpy.size(x1) + 2)
x2[1:-1] = x1
x2[0] = 2 * x2[1] - x2[2]
x2[-1] = 2 * x2[-2] - x2[-3]
map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))
map2[1:-1, 1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass:
pass
class SpectralEnergyDistribution:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Nlambda(self, lambda_):
if self.type == 'BB':
T = self.info[0]
x = 14387.769 / lambda_ / T
return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *
numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])
else:
print('ERROR: Invalid SED type')
exit()
class Filter:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Tlambda(self, lambda_):
if self.type == 'STH':
lmin = self.info[0]
dlmin = lmin * 0.02
lmax = self.info[1]
dlmax = lmax * 0.02
return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((
lambda_ - lmax) / dlmax)) / 2.0
elif self.type == 'interp':
return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])
else:
print('ERROR: Invalid filter type')
exit()
<|reserved_special_token_0|>
def onescut(n):
array = numpy.ones(n + 1)
array[0] = array[-1] = 0.5
return array / n
def gq_weights(sed, filter, nOrder, wlrange):
lmin = wlrange[0]
lmax = wlrange[1]
npts = wlrange[2]
x = numpy.linspace(lmin, lmax, npts)
c = numpy.zeros(npts)
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones(npts)
I = numpy.zeros(2 * nOrder)
lctr = numpy.mean(x)
for k in range(2 * nOrder):
I[k] = numpy.sum(o * (x - lctr) ** k * c)
coef = numpy.zeros(nOrder + 1)
coef[0] = 1.0
A = numpy.zeros((nOrder, nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k, j] = I[j + k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True
)
wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot, wroot
<|reserved_special_token_0|>
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
parOn = False
if hasattr(offsets, 'par'):
parOn = True
ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]
wt_L = 0.5 - pos[0] / sca.size
wt_R = 0.5 + pos[0] / sca.size
wt_B = 0.5 - pos[1] / sca.size
wt_T = 0.5 + pos[1] / sca.size
psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[
2, :] + wt_T * wt_R * ZR[3, :]
xf = sca.x[scanum - 1] + pos[0]
yf = sca.y[scanum - 1] + pos[1]
if parOn:
psi[3] += offsets.par[offset_index.foc]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[
offset_index.focg2] * yf) / sca.scale
scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D
if filt == 'K':
filter = Filter('STH', [1.95, 2.3])
elif filt == 'F':
filter = Filter('interp', FilterData[:, (0, 7)])
elif filt == 'H':
filter = Filter('interp', FilterData[:, (0, 6)])
elif filt == 'W':
filter = Filter('interp', FilterData[:, (0, 5)])
elif filt == 'J':
filter = Filter('interp', FilterData[:, (0, 4)])
elif filt == 'Y':
filter = Filter('interp', FilterData[:, (0, 3)])
elif filt == 'Z':
filter = Filter('interp', FilterData[:, (0, 2)])
elif filt == 'R':
filter = Filter('interp', FilterData[:, (0, 1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(0.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101):
fla[i] = filter.Tlambda(la[i])
scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)
mask = EmptyClass()
mask.N = 1
imk = 0
while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /
2 ** (imk + 1)):
imk += 1
if filt == 'F' or filt == 'K':
mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]
else:
mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]
if hasattr(addInfo, 'ctr'):
d = 0.5 * (1 - 1 / ovsamp)
psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
Cxx = Cyy = 0.09
Cxy = 0.0
if parOn:
Cxx = 0.09 + offsets.par[offset_index.jxx]
Cxy = offsets.par[offset_index.jxy]
Cyy = 0.09 + offsets.par[offset_index.jyy]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep, Nstep))
ky = numpy.zeros((Nstep, Nstep))
for i in range(-Nstep // 2, Nstep // 2):
kx[:, i] = abs(i)
ky[i, :] = abs(i)
kx *= 2.0 * numpy.pi * ovsamp / Nstep
ky *= 2.0 * numpy.pi * ovsamp / Nstep
output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky **
2 / 2.0 - Cxy * kx * ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return output
<|reserved_special_token_0|>
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo
) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])
if hasattr(addInfo, 'vtpe'):
out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *
ov, x:x + ov])
if hasattr(addInfo, 'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
for k in range(25):
dy = k % 5 - 2
dx = k // 5 - 2
out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov,
x + dx:x + dx + ov])
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo, 'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
ah = 0
if hasattr(addInfo, 'bfe_a'):
ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
ah += addInfo.bfe_aplus
for i in range(psSize - 1):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]
) / 2.0
dout[j, i] += shift * mflux
dout[j, i + 1] -= shift * mflux
av = 0
if hasattr(addInfo, 'bfe_a'):
av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
av -= addInfo.bfe_aplus
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize - 1):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]
) / 2.0
dout[j, i] += shift * mflux
dout[j + 1, i] -= shift * mflux
out += dout
if hasattr(addInfo, 'bfe_overwrite'):
out = dout
return out
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
addInfoX = copy.deepcopy(addInfo)
addInfoX.ctr = numpy.zeros(2)
addInfoX.F = 1.0
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method
='direct') / ov ** 2
Np = N + ov - 1
mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy
.float64)
newmom = numpy.zeros_like(mom)
con = 0.5
xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (
Np, 1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3] * mom[5] - mom[4] ** 2
xx = xx1 - mom[1]
yy = yy1 - mom[2]
G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] *
yy ** 2) / 2.0 / det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G * xx)
newmom[2] = numpy.sum(G * yy)
newmom[3] = numpy.sum(G * xx ** 2)
newmom[4] = numpy.sum(G * xx * yy)
newmom[5] = numpy.sum(G * yy ** 2)
mom[0] = 2 * newmom[0]
err = newmom[1:] / newmom[0]
err[-3:] -= mom[-3:] / 2.0
mom[1:] += err * con
return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /
ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3
] + mom[5])])
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs + var, 1e-24)
return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /
obs2)) * 2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1) + 2)
y2[1:-1] = y1
y2[0] = 2 * y2[1] - y2[2]
y2[-1] = 2 * y2[-2] - y2[-3]
x2 = numpy.zeros(numpy.size(x1) + 2)
x2[1:-1] = x1
x2[0] = 2 * x2[1] - x2[2]
x2[-1] = 2 * x2[-2] - x2[-3]
map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))
map2[1:-1, 1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass:
pass
class SpectralEnergyDistribution:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Nlambda(self, lambda_):
if self.type == 'BB':
T = self.info[0]
x = 14387.769 / lambda_ / T
return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *
numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])
else:
print('ERROR: Invalid SED type')
exit()
class Filter:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Tlambda(self, lambda_):
if self.type == 'STH':
lmin = self.info[0]
dlmin = lmin * 0.02
lmax = self.info[1]
dlmax = lmax * 0.02
return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((
lambda_ - lmax) / dlmax)) / 2.0
elif self.type == 'interp':
return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])
else:
print('ERROR: Invalid filter type')
exit()
<|reserved_special_token_0|>
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N = 5
M = zernike_map_noll(psi, N, N / (N - 1))
print(' *** Zernike {:2d} ***'.format(k + 1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j, i])
print(out)
print('')
<|reserved_special_token_0|>
def onescut(n):
array = numpy.ones(n + 1)
array[0] = array[-1] = 0.5
return array / n
def gq_weights(sed, filter, nOrder, wlrange):
lmin = wlrange[0]
lmax = wlrange[1]
npts = wlrange[2]
x = numpy.linspace(lmin, lmax, npts)
c = numpy.zeros(npts)
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones(npts)
I = numpy.zeros(2 * nOrder)
lctr = numpy.mean(x)
for k in range(2 * nOrder):
I[k] = numpy.sum(o * (x - lctr) ** k * c)
coef = numpy.zeros(nOrder + 1)
coef[0] = 1.0
A = numpy.zeros((nOrder, nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k, j] = I[j + k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True
)
wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot, wroot
<|reserved_special_token_0|>
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
parOn = False
if hasattr(offsets, 'par'):
parOn = True
ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]
wt_L = 0.5 - pos[0] / sca.size
wt_R = 0.5 + pos[0] / sca.size
wt_B = 0.5 - pos[1] / sca.size
wt_T = 0.5 + pos[1] / sca.size
psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[
2, :] + wt_T * wt_R * ZR[3, :]
xf = sca.x[scanum - 1] + pos[0]
yf = sca.y[scanum - 1] + pos[1]
if parOn:
psi[3] += offsets.par[offset_index.foc]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[
offset_index.focg2] * yf) / sca.scale
scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D
if filt == 'K':
filter = Filter('STH', [1.95, 2.3])
elif filt == 'F':
filter = Filter('interp', FilterData[:, (0, 7)])
elif filt == 'H':
filter = Filter('interp', FilterData[:, (0, 6)])
elif filt == 'W':
filter = Filter('interp', FilterData[:, (0, 5)])
elif filt == 'J':
filter = Filter('interp', FilterData[:, (0, 4)])
elif filt == 'Y':
filter = Filter('interp', FilterData[:, (0, 3)])
elif filt == 'Z':
filter = Filter('interp', FilterData[:, (0, 2)])
elif filt == 'R':
filter = Filter('interp', FilterData[:, (0, 1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(0.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101):
fla[i] = filter.Tlambda(la[i])
scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)
mask = EmptyClass()
mask.N = 1
imk = 0
while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /
2 ** (imk + 1)):
imk += 1
if filt == 'F' or filt == 'K':
mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]
else:
mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]
if hasattr(addInfo, 'ctr'):
d = 0.5 * (1 - 1 / ovsamp)
psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
Cxx = Cyy = 0.09
Cxy = 0.0
if parOn:
Cxx = 0.09 + offsets.par[offset_index.jxx]
Cxy = offsets.par[offset_index.jxy]
Cyy = 0.09 + offsets.par[offset_index.jyy]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep, Nstep))
ky = numpy.zeros((Nstep, Nstep))
for i in range(-Nstep // 2, Nstep // 2):
kx[:, i] = abs(i)
ky[i, :] = abs(i)
kx *= 2.0 * numpy.pi * ovsamp / Nstep
ky *= 2.0 * numpy.pi * ovsamp / Nstep
output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky **
2 / 2.0 - Cxy * kx * ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return output
<|reserved_special_token_0|>
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo
) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])
if hasattr(addInfo, 'vtpe'):
out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *
ov, x:x + ov])
if hasattr(addInfo, 'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
for k in range(25):
dy = k % 5 - 2
dx = k // 5 - 2
out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov,
x + dx:x + dx + ov])
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo, 'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
ah = 0
if hasattr(addInfo, 'bfe_a'):
ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
ah += addInfo.bfe_aplus
for i in range(psSize - 1):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]
) / 2.0
dout[j, i] += shift * mflux
dout[j, i + 1] -= shift * mflux
av = 0
if hasattr(addInfo, 'bfe_a'):
av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
av -= addInfo.bfe_aplus
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize - 1):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]
) / 2.0
dout[j, i] += shift * mflux
dout[j + 1, i] -= shift * mflux
out += dout
if hasattr(addInfo, 'bfe_overwrite'):
out = dout
return out
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
addInfoX = copy.deepcopy(addInfo)
addInfoX.ctr = numpy.zeros(2)
addInfoX.F = 1.0
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method
='direct') / ov ** 2
Np = N + ov - 1
mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy
.float64)
newmom = numpy.zeros_like(mom)
con = 0.5
xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (
Np, 1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3] * mom[5] - mom[4] ** 2
xx = xx1 - mom[1]
yy = yy1 - mom[2]
G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] *
yy ** 2) / 2.0 / det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G * xx)
newmom[2] = numpy.sum(G * yy)
newmom[3] = numpy.sum(G * xx ** 2)
newmom[4] = numpy.sum(G * xx * yy)
newmom[5] = numpy.sum(G * yy ** 2)
mom[0] = 2 * newmom[0]
err = newmom[1:] / newmom[0]
err[-3:] -= mom[-3:] / 2.0
mom[1:] += err * con
return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /
ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3
] + mom[5])])
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs + var, 1e-24)
return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /
obs2)) * 2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1) + 2)
y2[1:-1] = y1
y2[0] = 2 * y2[1] - y2[2]
y2[-1] = 2 * y2[-2] - y2[-3]
x2 = numpy.zeros(numpy.size(x1) + 2)
x2[1:-1] = x1
x2[0] = 2 * x2[1] - x2[2]
x2[-1] = 2 * x2[-2] - x2[-3]
map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))
map2[1:-1, 1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass:
pass
class SpectralEnergyDistribution:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Nlambda(self, lambda_):
if self.type == 'BB':
T = self.info[0]
x = 14387.769 / lambda_ / T
return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *
numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])
else:
print('ERROR: Invalid SED type')
exit()
class Filter:
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
def Tlambda(self, lambda_):
if self.type == 'STH':
lmin = self.info[0]
dlmin = lmin * 0.02
lmax = self.info[1]
dlmax = lmax * 0.02
return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((
lambda_ - lmax) / dlmax)) / 2.0
elif self.type == 'interp':
return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])
else:
print('ERROR: Invalid filter type')
exit()
<|reserved_special_token_0|>
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep), (
Nstep, 1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx ** 2 + yy ** 2) * scale
return numpy.where(numpy.logical_and(rho >= obs, rho < 1), numpy.ones((
Nstep, Nstep)), numpy.zeros((Nstep, Nstep)))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N = 5
M = zernike_map_noll(psi, N, N / (N - 1))
print(' *** Zernike {:2d} ***'.format(k + 1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j, i])
print(out)
print('')
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1 + 1 / N_in, 1 - 1 / N_in, N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array,
kx=1, ky=1)
x2 = numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep) * scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2, x2).astype(numpy.complex128
) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(0.32, Nstep, scale).astype(numpy.
complex128)
amplitude *= numpy.exp(2.0j * numpy.pi * zernike_map_noll(psi, Nstep,
scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude) ** 2
newpower = numpy.zeros_like(power)
newpower[Nstep // 2:Nstep, Nstep // 2:Nstep] = power[0:Nstep // 2, 0:
Nstep // 2]
newpower[Nstep // 2:Nstep, 0:Nstep // 2] = power[0:Nstep // 2, Nstep //
2:Nstep]
newpower[0:Nstep // 2, Nstep // 2:Nstep] = power[Nstep // 2:Nstep, 0:
Nstep // 2]
newpower[0:Nstep // 2, 0:Nstep // 2] = power[Nstep // 2:Nstep, Nstep //
2:Nstep]
return newpower / numpy.sum(newpower)
def onescut(n):
array = numpy.ones(n + 1)
array[0] = array[-1] = 0.5
return array / n
def gq_weights(sed, filter, nOrder, wlrange):
lmin = wlrange[0]
lmax = wlrange[1]
npts = wlrange[2]
x = numpy.linspace(lmin, lmax, npts)
c = numpy.zeros(npts)
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones(npts)
I = numpy.zeros(2 * nOrder)
lctr = numpy.mean(x)
for k in range(2 * nOrder):
I[k] = numpy.sum(o * (x - lctr) ** k * c)
coef = numpy.zeros(nOrder + 1)
coef[0] = 1.0
A = numpy.zeros((nOrder, nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k, j] = I[j + k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True
)
wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot, wroot
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl - 1
ilmax = 0
for il in range(1, hard_Nl):
wl = hard_lmin + il / hard_Nl * (hard_lmax - hard_lmin)
if filter.Tlambda(wl) > 0.0001:
if il < ilmin:
ilmin = il
wlmin = wl
if il > ilmax:
ilmax = il
wlmax = wl
na = ilmin // 6 + 1
nb = (hard_Nl - ilmax) // 6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin, wlmin, na + 1), numpy
.linspace(wlmin, wlmax, ilmax - ilmin + 1), numpy.linspace(wlmax,
hard_lmax, nb + 1)))
dwl = numpy.concatenate(((wlmin - hard_lmin) * onescut(na), (wlmax -
wlmin) * onescut(ilmax - ilmin), (hard_lmax - wlmax) * onescut(nb)))
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin, wlmax, ilmax -
ilmin + 1])
sumc = 0.0
output = numpy.zeros((Nstep, Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
c = dwl[i]
this_psi = numpy.copy(psi) / wl[i]
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um * wl[i], Nstep)
output /= sumc
return output
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
parOn = False
if hasattr(offsets, 'par'):
parOn = True
ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]
wt_L = 0.5 - pos[0] / sca.size
wt_R = 0.5 + pos[0] / sca.size
wt_B = 0.5 - pos[1] / sca.size
wt_T = 0.5 + pos[1] / sca.size
psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[
2, :] + wt_T * wt_R * ZR[3, :]
xf = sca.x[scanum - 1] + pos[0]
yf = sca.y[scanum - 1] + pos[1]
if parOn:
psi[3] += offsets.par[offset_index.foc]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[
offset_index.focg2] * yf) / sca.scale
scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D
if filt == 'K':
filter = Filter('STH', [1.95, 2.3])
elif filt == 'F':
filter = Filter('interp', FilterData[:, (0, 7)])
elif filt == 'H':
filter = Filter('interp', FilterData[:, (0, 6)])
elif filt == 'W':
filter = Filter('interp', FilterData[:, (0, 5)])
elif filt == 'J':
filter = Filter('interp', FilterData[:, (0, 4)])
elif filt == 'Y':
filter = Filter('interp', FilterData[:, (0, 3)])
elif filt == 'Z':
filter = Filter('interp', FilterData[:, (0, 2)])
elif filt == 'R':
filter = Filter('interp', FilterData[:, (0, 1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(0.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101):
fla[i] = filter.Tlambda(la[i])
scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)
mask = EmptyClass()
mask.N = 1
imk = 0
while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /
2 ** (imk + 1)):
imk += 1
if filt == 'F' or filt == 'K':
mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]
else:
mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]
if hasattr(addInfo, 'ctr'):
d = 0.5 * (1 - 1 / ovsamp)
psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
Cxx = Cyy = 0.09
Cxy = 0.0
if parOn:
Cxx = 0.09 + offsets.par[offset_index.jxx]
Cxy = offsets.par[offset_index.jxy]
Cyy = 0.09 + offsets.par[offset_index.jyy]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep, Nstep))
ky = numpy.zeros((Nstep, Nstep))
for i in range(-Nstep // 2, Nstep // 2):
kx[:, i] = abs(i)
ky[i, :] = abs(i)
kx *= 2.0 * numpy.pi * ovsamp / Nstep
ky *= 2.0 * numpy.pi * ovsamp / Nstep
output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky **
2 / 2.0 - Cxy * kx * ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return output
<|reserved_special_token_0|>
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo
) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])
if hasattr(addInfo, 'vtpe'):
out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *
ov, x:x + ov])
if hasattr(addInfo, 'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
for k in range(25):
dy = k % 5 - 2
dx = k // 5 - 2
out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov,
x + dx:x + dx + ov])
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo, 'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
ah = 0
if hasattr(addInfo, 'bfe_a'):
ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
ah += addInfo.bfe_aplus
for i in range(psSize - 1):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]
) / 2.0
dout[j, i] += shift * mflux
dout[j, i + 1] -= shift * mflux
av = 0
if hasattr(addInfo, 'bfe_a'):
av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'):
av -= addInfo.bfe_aplus
for i in range(psSize):
x = N // 2 + (i - psSize // 2) * ov
for j in range(psSize - 1):
y = N // 2 + (j - psSize // 2) * ov
shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0
if hasattr(addInfo, 'bfe_overwrite'):
shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo
.stamp_in[j, i]) / 2.0
mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]
) / 2.0
dout[j, i] += shift * mflux
dout[j + 1, i] -= shift * mflux
out += dout
if hasattr(addInfo, 'bfe_overwrite'):
out = dout
return out
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD
ov = OV_STD
if hasattr(addInfo, 'many'):
ov = addInfo.force_ov
if hasattr(addInfo, 'FastMode'):
if addInfo.FastMode:
N = N // 2
addInfoX = copy.deepcopy(addInfo)
addInfoX.ctr = numpy.zeros(2)
addInfoX.F = 1.0
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method
='direct') / ov ** 2
Np = N + ov - 1
mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy
.float64)
newmom = numpy.zeros_like(mom)
con = 0.5
xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (
Np, 1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3] * mom[5] - mom[4] ** 2
xx = xx1 - mom[1]
yy = yy1 - mom[2]
G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] *
yy ** 2) / 2.0 / det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G * xx)
newmom[2] = numpy.sum(G * yy)
newmom[3] = numpy.sum(G * xx ** 2)
newmom[4] = numpy.sum(G * xx * yy)
newmom[5] = numpy.sum(G * yy ** 2)
mom[0] = 2 * newmom[0]
err = newmom[1:] / newmom[0]
err[-3:] -= mom[-3:] / 2.0
mom[1:] += err * con
return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /
ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3
] + mom[5])])
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs + var, 1e-24)
return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /
obs2)) * 2
<|reserved_special_token_1|>
import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
|
flexible
|
{
"blob_id": "2ab6488276c74da8c3d9097d298fc53d1caf74b1",
"index": 6243,
"step-1": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-2": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-3": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N = 5\n M = zernike_map_noll(psi, N, N / (N - 1))\n print(' *** Zernike {:2d} ***'.format(k + 1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j, i])\n print(out)\n print('')\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-4": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef make_mask_annulus(obs, Nstep, scale):\n xx = numpy.tile(numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep), (\n Nstep, 1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx ** 2 + yy ** 2) * scale\n return numpy.where(numpy.logical_and(rho >= obs, rho < 1), numpy.ones((\n Nstep, Nstep)), numpy.zeros((Nstep, Nstep)))\n\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N = 5\n M = zernike_map_noll(psi, N, N / (N - 1))\n print(' *** Zernike {:2d} ***'.format(k + 1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j, i])\n print(out)\n print('')\n\n\ndef mono_psf(psi, mask, scale, Nstep):\n if hasattr(mask, 'N'):\n if hasattr(mask, 'spline'):\n interp_spline = mask.spline\n else:\n N_in = 2048\n x_in = numpy.linspace(-1 + 1 / N_in, 1 - 1 / N_in, N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array,\n kx=1, ky=1)\n x2 = numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep) * scale\n y2 = numpy.copy(x2)\n amplitude = interp_spline(y2, x2).astype(numpy.complex128\n ) * make_mask_annulus(0, Nstep, scale)\n else:\n amplitude = make_mask_annulus(0.32, Nstep, scale).astype(numpy.\n complex128)\n amplitude *= numpy.exp(2.0j * numpy.pi * zernike_map_noll(psi, Nstep,\n scale))\n amplitude = numpy.fft.ifft2(amplitude)\n power = numpy.abs(amplitude) ** 2\n newpower = numpy.zeros_like(power)\n newpower[Nstep // 2:Nstep, Nstep // 2:Nstep] = power[0:Nstep // 2, 0:\n Nstep // 2]\n newpower[Nstep // 2:Nstep, 0:Nstep // 2] = power[0:Nstep // 2, Nstep //\n 2:Nstep]\n newpower[0:Nstep // 2, Nstep // 2:Nstep] = power[Nstep // 2:Nstep, 0:\n Nstep // 2]\n newpower[0:Nstep // 2, 0:Nstep // 2] = power[Nstep // 2:Nstep, Nstep //\n 2:Nstep]\n return newpower / numpy.sum(newpower)\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\ndef poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):\n hard_lmin = 0.4\n hard_lmax = 2.5\n hard_Nl = 420\n ilmin = hard_Nl - 1\n ilmax = 0\n for il in range(1, hard_Nl):\n wl = hard_lmin + il / hard_Nl * (hard_lmax - hard_lmin)\n if filter.Tlambda(wl) > 0.0001:\n if il < ilmin:\n ilmin = il\n wlmin = wl\n if il > ilmax:\n ilmax = il\n wlmax = wl\n na = ilmin // 6 + 1\n nb = (hard_Nl - ilmax) // 6 + 1\n wl = numpy.concatenate((numpy.linspace(hard_lmin, wlmin, na + 1), numpy\n .linspace(wlmin, wlmax, ilmax - ilmin + 1), numpy.linspace(wlmax,\n hard_lmax, nb + 1)))\n dwl = numpy.concatenate(((wlmin - hard_lmin) * onescut(na), (wlmax -\n wlmin) * onescut(ilmax - ilmin), (hard_lmax - wlmax) * onescut(nb)))\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n wl, dwl = gq_weights(sed, filter, 10, [wlmin, wlmax, ilmax -\n ilmin + 1])\n sumc = 0.0\n output = numpy.zeros((Nstep, Nstep))\n for i in range(numpy.size(wl)):\n c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n c = dwl[i]\n this_psi = numpy.copy(psi) / wl[i]\n sumc += c\n output += c * mono_psf(this_psi, mask, scale_1um * wl[i], Nstep)\n output /= sumc\n return output\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-5": "import numpy\nimport numpy.fft\nimport numpy.linalg\nimport copy\nfrom astropy.io import fits\nfrom scipy.interpolate import RectBivariateSpline\nfrom scipy.signal import convolve\nimport offset_index\n\n# some basic definitions\npsSize = 9 # psSize x psSize postage stamps of stars\n\n# zero padded RectBivariateSpline, if on\ndef RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1)+2)\n y2[1:-1] = y1\n y2[0] = 2*y2[1]-y2[2]\n y2[-1] = 2*y2[-2]-y2[-3]\n x2 = numpy.zeros(numpy.size(x1)+2)\n x2[1:-1] = x1\n x2[0] = 2*x2[1]-x2[2]\n x2[-1] = 2*x2[-2]-x2[-3]\n map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))\n map2[1:-1,1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\nclass EmptyClass():\n pass\n\n# spectral energy distribution class\nclass SpectralEnergyDistribution():\n\n # make an SED -- several options for type\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n # get Nlambda (photons/m^2/s/um) at lambda_ (um)\n def Nlambda(self, lambda_):\n\n # blackbody, info = [T (K), solidangle]\n if self.type=='BB':\n T = self.info[0]\n x = 14387.769/lambda_/T # hc/(kTlambda)\n return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])\n # the 1e12 is the conversion from um^2 -> m^2\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n# filter class\nclass Filter():\n\n # make a filter -- several options for type\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n # get transmission\n def Tlambda(self, lambda_):\n\n # smoothed tophat\n if self.type=='STH':\n lmin = self.info[0]; dlmin = lmin*.02\n lmax = self.info[1]; dlmax = lmax*.02\n return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)\n # interpolated file\n # info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput\n elif self.type=='interp':\n return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n# load mask files\nmaskfiles = EmptyClass()\nmaskfiles.D = 2292981.05344 # um\nmaskfiles.rim = []\nmaskfiles.full = []\nmaskfiles.i_rim = []\nmaskfiles.i_full = []\nmaskfiles.nSCA = 18\nfor k in range(18):\n inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))\n maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]\n inFile.close()\n inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))\n maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]\n inFile.close()\n\n # normalize\n maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])\n maskfiles.full[k] /= numpy.amax(maskfiles.full[k])\n\n N_in = maskfiles.N_in = 2048\n x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)\n maskfiles.i_rim += [interp_spline]\n interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)\n maskfiles.i_full += [interp_spline]\n\n # lower resolution masks\n maskfiles.n_lores = 7\n for ku in range(1,maskfiles.n_lores):\n N2 = N_in//2**ku\n x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)\n maskfiles.i_rim += [interp_spline]\n interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)\n maskfiles.i_full += [interp_spline]\n\n# SCA locations\nsca = EmptyClass()\nsca.size = 40.88 # mm\nsca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,\n 22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])\nsca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,\n 12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])\nsca.scale = 133.08\n\n# reference Zernikes\nZernRef = EmptyClass()\nZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38\n\n# filter data\nFilterData = numpy.loadtxt('pupils/filter.dat')\nFilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2\n\n# makes map of Zernikes of a given amplitude\n# amp[0:Namp] = Z1 ... ZNamp\n# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)\n#\ndef zernike_map_noll(amp, Ngrid, scale):\n xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx**2+yy**2)*scale\n phi = numpy.arctan2(yy,xx)\n output = numpy.zeros((Ngrid,Ngrid))\n nmax = 0\n namp = numpy.size(amp)\n while namp>(nmax+1)*(nmax+2)//2: nmax+=1\n rpows = numpy.ones((nmax+1,Ngrid,Ngrid))\n trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))\n for i in range(1,nmax+1): rpows[i,:,:] = rho**i\n for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)\n for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)\n # loop over Zernikes\n for n in range(nmax+1):\n for m in range(-n,n+1,2):\n Z = numpy.zeros((Ngrid,Ngrid))\n for k in range((n-abs(m))//2+1):\n coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \\\n /numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k) \n Z += coef * rpows[n-2*k,:,:]\n #if m>=0:\n # Z *= numpy.cos(m*phi)\n #else:\n # Z *= numpy.sin(-m*phi)\n Z *= trigphi[m,:,:]\n j = n*(n+1)//2 + abs(m)\n if (-1)**j*(m+.5)<0 or m==0: j += 1\n #print(n,m,j)\n factor = numpy.sqrt(n+1)\n if m!=0: factor *= numpy.sqrt(2)\n if j<=namp: output += factor * amp[j-1] * Z\n return(output)\n\n# make annular mask of given obstruction (fraction) and scale\ndef make_mask_annulus(obs, Nstep, scale):\n xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx**2+yy**2)*scale\n return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N=5\n M = zernike_map_noll(psi, N, N/(N-1))\n print(' *** Zernike {:2d} ***'.format(k+1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j,i])\n print(out)\n print('')\n\n# psi is a vector of Zernikes, in wavelengths\n# mask information: (currently none)\n# scale = sampling (points per lambda/D)\n# Nstep = # grid points\n# output normalized to sum to 1\ndef mono_psf(psi, mask, scale, Nstep):\n if hasattr(mask, 'N'):\n if hasattr(mask, 'spline'):\n interp_spline = mask.spline\n else:\n N_in = 2048\n x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)\n x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale\n y2 = numpy.copy(x2)\n amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)\n else:\n amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)\n amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))\n amplitude = numpy.fft.ifft2(amplitude)\n power = numpy.abs(amplitude)**2\n # shift to center\n newpower = numpy.zeros_like(power)\n newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]\n newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]\n newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]\n newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]\n return(newpower/numpy.sum(newpower))\n\n# helper function\ndef onescut(n):\n array = numpy.ones((n+1))\n array[0] = array[-1] = .5\n return(array/n)\n\n# Gaussian quadrature weights across a filter\n# sed = spectral energy distribution\n# filter = filter information (incl. bandpass)\n# nOrder = order of polynomial (number of nodes)\n# wlrange = [lmin,lmax,npts] in um\n#\n# returns wavelengths, weights\ndef gq_weights(sed, filter, nOrder, wlrange):\n # unpack info\n lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]\n\n # build integrals I_k = int x^k S(x) F(x) dx\n x = numpy.linspace(lmin,lmax,npts)\n c = numpy.zeros((npts))\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones((npts))\n I = numpy.zeros((2*nOrder))\n lctr = numpy.mean(x)\n for k in range(2*nOrder):\n I[k] = numpy.sum(o*(x-lctr)**k*c)\n # orthogonal polynomial p_n\n # require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or\n # sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1\n coef = numpy.zeros((nOrder+1))\n coef[0] = 1.\n A = numpy.zeros((nOrder,nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k,j] = I[j+k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)\n wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot,wroot\n\n# psi is a vector of Zernikes, in microns\n# mask information: (currently none)\n# sed = spectral energy distribution\n# scale = sampling (points per lambda/D @ 1 um)\n# Nstep = # grid points\n# filter = filter information (incl. bandpass)\n# addInfo = class for general additional information\n# output normalized to sum to 1\ndef poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):\n\n # integration steps\n hard_lmin = 0.4\n hard_lmax = 2.5\n hard_Nl = 420\n\n ilmin = hard_Nl-1; ilmax = 0\n for il in range(1,hard_Nl):\n wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)\n if filter.Tlambda(wl)>1e-4:\n if il<ilmin:\n ilmin=il\n wlmin=wl\n if il>ilmax:\n ilmax=il\n wlmax=wl\n na = ilmin//6 + 1\n nb = (hard_Nl-ilmax)//6 + 1\n wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))\n dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))\n #print(wl,dwl,numpy.size(wl),numpy.size(dwl))\n\n # reduced coverage\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])\n\n # make output PSF\n sumc = 0.\n output = numpy.zeros((Nstep,Nstep))\n for i in range(numpy.size(wl)):\n c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode: c = dwl[i]\n this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront\n sumc += c\n output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)\n #print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))\n output /= sumc\n\n return(output)\n\n# make oversampled PSF at given SCA, position\n#\n# sed = source SED\n# filt = filter (letter: RZYJHFK)\n# ovsamp = oversampling factor\n# Nstep = number of samples in each axis\n# scanum = SCA number (1..18)\n# pos = (x,y) position on SCA in mm (0,0)=center\n# offsets = adjustment parameters\n# .par -> offset parameters\n# addInfo = additional information class:\n# .ctr -> centroid (dx,dy) \ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n\n # get information\n parOn = False\n if hasattr(offsets, 'par'): parOn = True\n\n # get Zernikes in microns\n ZR = ZernRef.data[4*(scanum-1):4*scanum,:]\n wt_L = .5 - pos[0]/sca.size\n wt_R = .5 + pos[0]/sca.size\n wt_B = .5 - pos[1]/sca.size\n wt_T = .5 + pos[1]/sca.size\n psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]\n\n xf = sca.x[scanum-1] + pos[0]\n yf = sca.y[scanum-1] + pos[1]\n\n # Zernike offsets\n if parOn:\n psi[3] += offsets.par[offset_index.foc ]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n\n psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale\n\n scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D\n #print(scale_1um)\n\n # filter curves\n if filt=='K':\n filter = Filter('STH', [1.95,2.30])\n elif filt=='F':\n filter = Filter('interp', FilterData[:,(0,7)])\n elif filt=='H':\n filter = Filter('interp', FilterData[:,(0,6)])\n elif filt=='W':\n filter = Filter('interp', FilterData[:,(0,5)])\n elif filt=='J':\n filter = Filter('interp', FilterData[:,(0,4)])\n elif filt=='Y':\n filter = Filter('interp', FilterData[:,(0,3)])\n elif filt=='Z':\n filter = Filter('interp', FilterData[:,(0,2)])\n elif filt=='R':\n filter = Filter('interp', FilterData[:,(0,1)])\n else:\n print('Error: unknown filter')\n exit()\n\n la = numpy.linspace(.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101): fla[i] = filter.Tlambda(la[i])\n scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)\n\n # get the mask\n mask = EmptyClass(); mask.N=1\n imk = 0\n while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1\n #print(' *** ', Nstep, scale, scale/scale_1um, imk)\n if filt=='F' or filt=='K':\n mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]\n else:\n mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]\n\n # x & y offsets\n if hasattr(addInfo, 'ctr'):\n d = .5*(1-1/ovsamp)\n psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.\n\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n\n # smooth\n Cxx = Cyy = .09; Cxy = 0.\n if parOn:\n Cxx = .09 + offsets.par[offset_index.jxx ]\n Cxy = offsets.par[offset_index.jxy ]\n Cyy = .09 + offsets.par[offset_index.jyy ]\n\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep,Nstep))\n ky = numpy.zeros((Nstep,Nstep))\n for i in range(-Nstep//2, Nstep//2):\n kx[:,i] = abs(i)\n ky[i,:] = abs(i)\n kx *= 2.*numpy.pi*ovsamp/Nstep\n ky *= 2.*numpy.pi*ovsamp/Nstep\n output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n\n return(output)\n\n# parameters for next couple of functions\nN_STD = 1024 # must be a multiple of 4\nOV_STD = 8\n\n# make oversampled PSF at given SCA, position\n#\n# sed = source SED\n# filt = filter (letter: RZYJHFK)\n# scanum = SCA number (1..18)\n# pos = (x,y) position on SCA in mm (0,0)=center\n# offsets = adjustment parameters (placeholder)\n# addInfo = additional information class:\n# .F -> total counts (in e)\n# .ctr -> centroid (dx,dy)\n# .many -> @ 5x5 grid of offsets\n#\n# .bfe = add bfe (can include .bfe_a, .bfe_aplus)\n#\n# .bfe_overwrite => special mode to compute BFE with time dependent PSF\n# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD # must be even\n ov = OV_STD\n if hasattr(addInfo,'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n N = N//2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])\n if hasattr(addInfo, 'vtpe'):\n out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])\n if hasattr(addInfo,'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n for k in range(25):\n dy = k%5 - 2; dx = k//5 - 2\n out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])\n\n # BFE?\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo,'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n # horizontal BFE\n ah = 0\n if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus\n for i in range(psSize-1):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure\n if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.\n mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.\n dout[j,i] += shift*mflux\n dout[j,i+1] -= shift*mflux\n # vertical BFE\n av = 0\n if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize-1):\n y = N//2+(j-psSize//2)*ov\n shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure\n if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.\n mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.\n dout[j,i] += shift*mflux\n dout[j+1,i] -= shift*mflux\n out+=dout\n\n if hasattr(addInfo, 'bfe_overwrite'): out=dout\n\n return(out)\n\n#\n# same input format but returns moments of the PSF\n# A, xc, yc, T, e1, e2\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD # must be even\n ov = OV_STD\n if hasattr(addInfo,'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n N = N//2\n addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2\n Np = N+ov-1\n # moment format: A,x,y,Cxx,Cxy,Cyy\n mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)\n newmom = numpy.zeros_like(mom)\n con = .5 # convergence factor\n xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3]*mom[5]-mom[4]**2\n xx = xx1-mom[1]\n yy = yy1-mom[2]\n G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G*xx)\n newmom[2] = numpy.sum(G*yy)\n newmom[3] = numpy.sum(G*xx**2)\n newmom[4] = numpy.sum(G*xx*yy)\n newmom[5] = numpy.sum(G*yy**2)\n mom[0] = 2*newmom[0]\n err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.\n mom[1:] += err*con\n return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))\n\n# returns chi^2\n# var = read noise variance\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs+var, 1e-24)\n return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)\n",
"step-ids": [
13,
14,
15,
18,
23
]
}
|
[
13,
14,
15,
18,
23
] |
from test.demo_test_case import DemoTestCase
class UserTest(DemoTestCase):
def test_access_secure_area(self):
r = self.get('/api/user')
self.assertEqual(401, r.status_code)
def test_login_bad_password(self):
r = self.post('/api/connect', {'user': 'admin', 'password':
'badpassword'})
self.assertEqual(401, r.status_code)
def test_login_good_password(self):
r = self.post('/api/connect', {'user': 'admin', 'password': 'admin'})
self.assertEqual(200, r.status_code)
def test_get_info(self):
self.login('admin', 'admin')
r = self.get('/api/user')
rep = self.rep_to_dict(r.text)
self.assertEqual(200, r.status_code)
self.assertEqual('admin', rep['login'])
|
normal
|
{
"blob_id": "0a1d102075cebee13e25f3eb703811d1e22f53c2",
"index": 1957,
"step-1": "<mask token>\n\n\nclass UserTest(DemoTestCase):\n <mask token>\n\n def test_login_bad_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password':\n 'badpassword'})\n self.assertEqual(401, r.status_code)\n\n def test_login_good_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password': 'admin'})\n self.assertEqual(200, r.status_code)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass UserTest(DemoTestCase):\n\n def test_access_secure_area(self):\n r = self.get('/api/user')\n self.assertEqual(401, r.status_code)\n\n def test_login_bad_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password':\n 'badpassword'})\n self.assertEqual(401, r.status_code)\n\n def test_login_good_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password': 'admin'})\n self.assertEqual(200, r.status_code)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass UserTest(DemoTestCase):\n\n def test_access_secure_area(self):\n r = self.get('/api/user')\n self.assertEqual(401, r.status_code)\n\n def test_login_bad_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password':\n 'badpassword'})\n self.assertEqual(401, r.status_code)\n\n def test_login_good_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password': 'admin'})\n self.assertEqual(200, r.status_code)\n\n def test_get_info(self):\n self.login('admin', 'admin')\n r = self.get('/api/user')\n rep = self.rep_to_dict(r.text)\n self.assertEqual(200, r.status_code)\n self.assertEqual('admin', rep['login'])\n",
"step-4": "from test.demo_test_case import DemoTestCase\n\n\nclass UserTest(DemoTestCase):\n\n def test_access_secure_area(self):\n r = self.get('/api/user')\n self.assertEqual(401, r.status_code)\n\n def test_login_bad_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password':\n 'badpassword'})\n self.assertEqual(401, r.status_code)\n\n def test_login_good_password(self):\n r = self.post('/api/connect', {'user': 'admin', 'password': 'admin'})\n self.assertEqual(200, r.status_code)\n\n def test_get_info(self):\n self.login('admin', 'admin')\n r = self.get('/api/user')\n rep = self.rep_to_dict(r.text)\n self.assertEqual(200, r.status_code)\n self.assertEqual('admin', rep['login'])\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
from typing import List
class Solution:
def destCity(self, paths: List[List[str]]) ->str:
departCity = set()
destCity = []
for i in paths:
if i[1] not in departCity:
destCity.append(i[1])
if i[0] in destCity:
destCity.remove(i[0])
departCity.add(i[0])
return destCity[0]
|
normal
|
{
"blob_id": "03cc3bf37ea8d971550a89107161005901d842de",
"index": 2514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def destCity(self, paths: List[List[str]]) ->str:\n departCity = set()\n destCity = []\n for i in paths:\n if i[1] not in departCity:\n destCity.append(i[1])\n if i[0] in destCity:\n destCity.remove(i[0])\n departCity.add(i[0])\n return destCity[0]\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def destCity(self, paths: List[List[str]]) ->str:\n departCity = set()\n destCity = []\n for i in paths:\n if i[1] not in departCity:\n destCity.append(i[1])\n if i[0] in destCity:\n destCity.remove(i[0])\n departCity.add(i[0])\n return destCity[0]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *
Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) -
10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - np.sqrt(Y[0] / g))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6
g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZDT1:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *
Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) -
10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - np.sqrt(Y[0] / g))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6
g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZDT1:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 0.5)
return Y
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *
Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) -
10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - np.sqrt(Y[0] / g))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6
g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
if __name__ == '__main__':
zdt = ZDT1()
print(zdt.Func(np.ones(zdt.dimension)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
class ZDT1:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 0.5)
return Y
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])
Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *
Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) -
10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - np.sqrt(Y[0] / g))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = self.min, self.max
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6
g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
if __name__ == '__main__':
zdt = ZDT1()
print(zdt.Func(np.ones(zdt.dimension)))
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Swking
@File : ZDT.py
@Date : 2018/12/28
@Desc :
"""
import numpy as np
class ZDT1:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - ((Y[0] / g)**0.5))
return Y
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - (np.sqrt(Y[0] / g)))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)
g = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
if __name__ == '__main__':
zdt = ZDT1()
print(zdt.Func(np.ones(zdt.dimension)))
|
flexible
|
{
"blob_id": "8ca16947054b681a5f43d8b8029191d031d3a218",
"index": 8352,
"step-1": "<mask token>\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ZDT1:\n <mask token>\n <mask token>\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ZDT1:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 0.5)\n return Y\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nif __name__ == '__main__':\n zdt = ZDT1()\n print(zdt.Func(np.ones(zdt.dimension)))\n",
"step-4": "<mask token>\nimport numpy as np\n\n\nclass ZDT1:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 0.5)\n return Y\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nif __name__ == '__main__':\n zdt = ZDT1()\n print(zdt.Func(np.ones(zdt.dimension)))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Swking\n@File : ZDT.py\n@Date : 2018/12/28\n@Desc : \n\"\"\"\nimport numpy as np\nclass ZDT1:\n\tdef __init__(self):\n\t\tself.dimension = 30\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - ((Y[0] / g)**0.5))\n\t\treturn Y\n\nclass ZDT2:\n\tdef __init__(self):\n\t\tself.dimension = 30\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - (Y[0] / g) ** 2)\n\t\treturn Y\n\n\nclass ZDT3:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))\n\t\treturn Y\n\n\nclass ZDT4:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension) - 5\n\t\tself.min[0] = 0\n\t\tself.max = np.zeros(self.dimension) + 5\n\t\tself.max[0] = 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))\n\t\tY[1] = g * (1 - (np.sqrt(Y[0] / g)))\n\t\treturn Y\n\n\nclass ZDT6:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)\n\t\tg = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)\n\t\tY[1] = g * (1 - (Y[0] / g) ** 2)\n\t\treturn Y\n\nif __name__ == '__main__':\n\tzdt = ZDT1()\n\tprint(zdt.Func(np.ones(zdt.dimension)))",
"step-ids": [
12,
13,
16,
17,
18
]
}
|
[
12,
13,
16,
17,
18
] |
# -*- coding: utf-8 -*-
import os
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
# github
GITHUB_OAUTH2 = {
#github上获取
'client_id': '',
'client_secret': '',
'callback_url': '',
'scope': 'user',
'auth_url': 'http://github.com/login/oauth/authorize?client_id={client_id}&scope={scope}&state={csrf}'
'&redirect_uri={redirect_uri}',
}
# access_token过期时间设置,单位天
ACCESS_TOKEN_EXP = 30
# cookie 名称
AUTH_COOKIE_NAME = 'token'
# elastisearch配置,docker配置,所以host直接使用名称,正常情况为ip
ELASTICSEARCH_URL = "elasticsearch:9200"
# scrapyd配置
SCRAPYD_URL = "http://127.0.0.1:6800"
SCRAPY_PROJECT_NAME = "spider_tophub"
# 爬虫scheduler配置
JOBSTORES={'default': SQLAlchemyJobStore(url='mysql+pymysql://your_user:your_user_password@mysql:3306/your_databases')}
JOB_DEFAULTS={
'coalesce': True,
'max_instances': 1
}
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'mysql+pymysql://your_user:your_user_password@mysql:3306/your_databases'
# 关闭flask_sqlalchemy事件系统
SQLALCHEMY_TRACK_MODIFICATIONS = False
config = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
|
normal
|
{
"blob_id": "af7a124c873dda02ba2a78e85965aa243d791863",
"index": 3432,
"step-1": "# -*- coding: utf-8 -*-\nimport os\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\n\n\nclass Config:\n SECRET_KEY = os.environ.get('SECRET_KEY')\n # github\n GITHUB_OAUTH2 = {\n #github上获取\n 'client_id': '',\n 'client_secret': '',\n 'callback_url': '',\n 'scope': 'user',\n 'auth_url': 'http://github.com/login/oauth/authorize?client_id={client_id}&scope={scope}&state={csrf}'\n '&redirect_uri={redirect_uri}',\n }\n # access_token过期时间设置,单位天\n ACCESS_TOKEN_EXP = 30\n\n # cookie 名称\n AUTH_COOKIE_NAME = 'token'\n\n # elastisearch配置,docker配置,所以host直接使用名称,正常情况为ip\n ELASTICSEARCH_URL = \"elasticsearch:9200\"\n\n # scrapyd配置\n SCRAPYD_URL = \"http://127.0.0.1:6800\"\n SCRAPY_PROJECT_NAME = \"spider_tophub\"\n\n # 爬虫scheduler配置\n JOBSTORES={'default': SQLAlchemyJobStore(url='mysql+pymysql://your_user:your_user_password@mysql:3306/your_databases')}\n JOB_DEFAULTS={\n 'coalesce': True,\n 'max_instances': 1\n }\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'mysql+pymysql://your_user:your_user_password@mysql:3306/your_databases'\n # 关闭flask_sqlalchemy事件系统\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\n\n\n\nconfig = {\n 'development': DevelopmentConfig,\n 'default': DevelopmentConfig\n}",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# coding: utf-8
# # Lesson 2 Demo 3: Creating Fact and Dimension Tables with Star Schema
#
# <img src="images/postgresSQLlogo.png" width="250" height="250">
# ### Walk through the basics of modeling data using Fact and Dimension tables. In this demo, we will:<br>
# <ol><li>Create both Fact and Dimension tables<li>Show how this is a basic element of the Star Schema.
# ### Import the library
# Note: An error might popup after this command has executed. If it does, read it carefully before ignoring.
# In[ ]:
import psycopg2
# ### Create a connection to the database
# In[ ]:
try:
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=student password=student")
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
# ### Next use that connection to get a cursor that we will use to execute queries.
# In[ ]:
try:
cur = conn.cursor()
except psycopg2.Error as e:
print("Error: Could not get curser to the Database")
print(e)
# ### For this demo we will use automatic commit so that each action is commited without having to call conn.commit() after each command. The ability to rollback and commit transactions are a feature of Relational Databases.
# In[ ]:
conn.set_session(autocommit=True)
# ### Let's imagine we work at an online Music Store. There will be many tables in our database but let's just focus on 4 tables around customer purchases.
#
# `Table Name: customer_transactions
# column: Customer Id
# column: Store Id
# column: Spent`
#
# `Table Name: Customer
# column: Customer Id
# column: Name
# column: Rewards`
#
# `Table Name: store
# column: Store Id
# column: State`
#
# `Table Name: items_purchased
# column: customer id
# column: Item Name`
#
# <img src="images/starSchema.png" width="750" height="750">
#
# #### From this representation we can already start to see the makings of a "STAR". We have one fact table (the center of the star) and 3 dimension tables that are coming from it.
# ### Let's create the Fact Table and insert the data into the table
# In[ ]:
try:
cur.execute("CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
#Insert into all tables
try:
cur.execute("INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)", (1, 1, 20.50))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)", (2, 1, 35.21))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# ### Let's create our Dimension Tables and insert data into those tables.
# In[ ]:
try:
cur.execute("CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)", (1, 1, "Rubber Soul"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)", (2, 3, "Let It Be"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO store (store_id, state) VALUES (%s, %s)", (1, "CA"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO store (store_id, state) VALUES (%s, %s)", (2, "WA"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)", (1, "Amanda", True))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)", (2, "Toby", False))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# **We can do a variety of queries on this data easily because of utilizing the fact/dimension and Star Schema**
#
# * _Query 1_: Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member
#
# * _Query 2_: How much did Store 1 sell?
# _Query 1:_ Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member
# In[ ]:
try:
cur.execute("SELECT name, item_name, rewards FROM ((customer_transactions JOIN customer ON customer.customer_id=customer_transactions.customer_id) JOIN items_purchased ON customer_transactions.customer_id=items_purchased.customer_id) WHERE spent > 30 ;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# _Query 2:_ How much did Store 1 sell?
# In[ ]:
try:
cur.execute("SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# ### Summary: What you can see here is from this elegant schema we were able to get "facts/metrics" from our fact table (how much each store sold), and also information about our customers that will allow us to do more indepth analytics to get answers to business questions by utilizing our fact and dimension tables.
# ### For the sake of the demo, I will drop the table.
# In[ ]:
try:
cur.execute("DROP table customer_transactions")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table items_purchased")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table customer")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table store")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
# ### And finally close your cursor and connection.
# In[ ]:
cur.close()
conn.close()
# In[ ]:
|
normal
|
{
"blob_id": "70964ac617847dd4bf4a60a142afc94d0f284a24",
"index": 7621,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n conn = psycopg2.connect(\n 'host=127.0.0.1 dbname=studentdb user=student password=student')\nexcept psycopg2.Error as e:\n print('Error: Could not make connection to the Postgres database')\n print(e)\ntry:\n cur = conn.cursor()\nexcept psycopg2.Error as e:\n print('Error: Could not get curser to the Database')\n print(e)\nconn.set_session(autocommit=True)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)'\n , (1, 1, 20.5))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)'\n , (2, 1, 35.21))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)'\n , (1, 1, 'Rubber Soul'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)'\n , (2, 3, 'Let It Be'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);')\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO store (store_id, state) VALUES (%s, %s)',\n (1, 'CA'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO store (store_id, state) VALUES (%s, %s)',\n (2, 'WA'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)'\n , (1, 'Amanda', True))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)'\n , (2, 'Toby', False))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'SELECT name, item_name, rewards FROM ((customer_transactions JOIN customer ON customer.customer_id=customer_transactions.customer_id) JOIN items_purchased ON customer_transactions.customer_id=items_purchased.customer_id) WHERE spent > 30 ;'\n )\nexcept psycopg2.Error as e:\n print('Error: select *')\n print(e)\n<mask token>\nwhile row:\n print(row)\n row = cur.fetchone()\ntry:\n cur.execute(\n 'SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;'\n )\nexcept psycopg2.Error as e:\n print('Error: select *')\n print(e)\n<mask token>\nwhile row:\n print(row)\n row = cur.fetchone()\ntry:\n cur.execute('DROP table customer_transactions')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table items_purchased')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table customer')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table store')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ncur.close()\nconn.close()\n",
"step-3": "<mask token>\ntry:\n conn = psycopg2.connect(\n 'host=127.0.0.1 dbname=studentdb user=student password=student')\nexcept psycopg2.Error as e:\n print('Error: Could not make connection to the Postgres database')\n print(e)\ntry:\n cur = conn.cursor()\nexcept psycopg2.Error as e:\n print('Error: Could not get curser to the Database')\n print(e)\nconn.set_session(autocommit=True)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)'\n , (1, 1, 20.5))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)'\n , (2, 1, 35.21))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)'\n , (1, 1, 'Rubber Soul'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)'\n , (2, 3, 'Let It Be'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);')\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO store (store_id, state) VALUES (%s, %s)',\n (1, 'CA'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO store (store_id, state) VALUES (%s, %s)',\n (2, 'WA'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)'\n , (1, 'Amanda', True))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)'\n , (2, 'Toby', False))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'SELECT name, item_name, rewards FROM ((customer_transactions JOIN customer ON customer.customer_id=customer_transactions.customer_id) JOIN items_purchased ON customer_transactions.customer_id=items_purchased.customer_id) WHERE spent > 30 ;'\n )\nexcept psycopg2.Error as e:\n print('Error: select *')\n print(e)\nrow = cur.fetchone()\nwhile row:\n print(row)\n row = cur.fetchone()\ntry:\n cur.execute(\n 'SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;'\n )\nexcept psycopg2.Error as e:\n print('Error: select *')\n print(e)\nrow = cur.fetchone()\nwhile row:\n print(row)\n row = cur.fetchone()\ntry:\n cur.execute('DROP table customer_transactions')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table items_purchased')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table customer')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table store')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ncur.close()\nconn.close()\n",
"step-4": "import psycopg2\ntry:\n conn = psycopg2.connect(\n 'host=127.0.0.1 dbname=studentdb user=student password=student')\nexcept psycopg2.Error as e:\n print('Error: Could not make connection to the Postgres database')\n print(e)\ntry:\n cur = conn.cursor()\nexcept psycopg2.Error as e:\n print('Error: Could not get curser to the Database')\n print(e)\nconn.set_session(autocommit=True)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)'\n , (1, 1, 20.5))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)'\n , (2, 1, 35.21))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)'\n , (1, 1, 'Rubber Soul'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)'\n , (2, 3, 'Let It Be'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);')\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO store (store_id, state) VALUES (%s, %s)',\n (1, 'CA'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO store (store_id, state) VALUES (%s, %s)',\n (2, 'WA'))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);'\n )\nexcept psycopg2.Error as e:\n print('Error: Issue creating table')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)'\n , (1, 'Amanda', True))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)'\n , (2, 'Toby', False))\nexcept psycopg2.Error as e:\n print('Error: Inserting Rows')\n print(e)\ntry:\n cur.execute(\n 'SELECT name, item_name, rewards FROM ((customer_transactions JOIN customer ON customer.customer_id=customer_transactions.customer_id) JOIN items_purchased ON customer_transactions.customer_id=items_purchased.customer_id) WHERE spent > 30 ;'\n )\nexcept psycopg2.Error as e:\n print('Error: select *')\n print(e)\nrow = cur.fetchone()\nwhile row:\n print(row)\n row = cur.fetchone()\ntry:\n cur.execute(\n 'SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;'\n )\nexcept psycopg2.Error as e:\n print('Error: select *')\n print(e)\nrow = cur.fetchone()\nwhile row:\n print(row)\n row = cur.fetchone()\ntry:\n cur.execute('DROP table customer_transactions')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table items_purchased')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table customer')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ntry:\n cur.execute('DROP table store')\nexcept psycopg2.Error as e:\n print('Error: Dropping table')\n print(e)\ncur.close()\nconn.close()\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Lesson 2 Demo 3: Creating Fact and Dimension Tables with Star Schema\n# \n# <img src=\"images/postgresSQLlogo.png\" width=\"250\" height=\"250\">\n\n# ### Walk through the basics of modeling data using Fact and Dimension tables. In this demo, we will:<br>\n# <ol><li>Create both Fact and Dimension tables<li>Show how this is a basic element of the Star Schema.\n\n# ### Import the library \n# Note: An error might popup after this command has executed. If it does, read it carefully before ignoring. \n\n# In[ ]:\n\n\nimport psycopg2\n\n\n# ### Create a connection to the database\n\n# In[ ]:\n\n\ntry: \n conn = psycopg2.connect(\"host=127.0.0.1 dbname=studentdb user=student password=student\")\nexcept psycopg2.Error as e: \n print(\"Error: Could not make connection to the Postgres database\")\n print(e)\n\n\n# ### Next use that connection to get a cursor that we will use to execute queries.\n\n# In[ ]:\n\n\ntry: \n cur = conn.cursor()\nexcept psycopg2.Error as e: \n print(\"Error: Could not get curser to the Database\")\n print(e)\n\n\n# ### For this demo we will use automatic commit so that each action is commited without having to call conn.commit() after each command. The ability to rollback and commit transactions are a feature of Relational Databases. \n\n# In[ ]:\n\n\nconn.set_session(autocommit=True)\n\n\n# ### Let's imagine we work at an online Music Store. There will be many tables in our database but let's just focus on 4 tables around customer purchases. \n# \n# `Table Name: customer_transactions\n# column: Customer Id\n# column: Store Id\n# column: Spent`\n# \n# `Table Name: Customer\n# column: Customer Id\n# column: Name\n# column: Rewards`\n# \n# `Table Name: store\n# column: Store Id\n# column: State`\n# \n# `Table Name: items_purchased\n# column: customer id\n# column: Item Name`\n# \n# <img src=\"images/starSchema.png\" width=\"750\" height=\"750\">\n# \n# #### From this representation we can already start to see the makings of a \"STAR\". We have one fact table (the center of the star) and 3 dimension tables that are coming from it.\n\n# ### Let's create the Fact Table and insert the data into the table\n\n# In[ ]:\n\n\ntry: \n cur.execute(\"CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);\")\nexcept psycopg2.Error as e: \n print(\"Error: Issue creating table\")\n print (e)\n \n#Insert into all tables \ntry: \n cur.execute(\"INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)\", (1, 1, 20.50))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\ntry: \n cur.execute(\"INSERT INTO customer_transactions (customer_id, store_id, spent) VALUES (%s, %s, %s)\", (2, 1, 35.21))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\n\n\n# ### Let's create our Dimension Tables and insert data into those tables.\n\n# In[ ]:\n\n\ntry: \n cur.execute(\"CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);\")\nexcept psycopg2.Error as e: \n print(\"Error: Issue creating table\")\n print (e)\n \ntry: \n cur.execute(\"INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)\", (1, 1, \"Rubber Soul\"))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\n \ntry: \n cur.execute(\"INSERT INTO items_purchased (customer_id, item_number, item_name) VALUES (%s, %s, %s)\", (2, 3, \"Let It Be\"))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\n \ntry: \n cur.execute(\"CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);\")\nexcept psycopg2.Error as e: \n print(\"Error: Issue creating table\")\n print (e)\n \ntry: \n cur.execute(\"INSERT INTO store (store_id, state) VALUES (%s, %s)\", (1, \"CA\"))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\ntry: \n cur.execute(\"INSERT INTO store (store_id, state) VALUES (%s, %s)\", (2, \"WA\"))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\n \ntry: \n cur.execute(\"CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);\")\nexcept psycopg2.Error as e: \n print(\"Error: Issue creating table\")\n print (e)\n \ntry: \n cur.execute(\"INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)\", (1, \"Amanda\", True))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\n\ntry: \n cur.execute(\"INSERT INTO customer (customer_id, name, rewards) VALUES (%s, %s, %s)\", (2, \"Toby\", False))\nexcept psycopg2.Error as e: \n print(\"Error: Inserting Rows\")\n print (e)\n\n\n# **We can do a variety of queries on this data easily because of utilizing the fact/dimension and Star Schema**\n# \n# * _Query 1_: Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member\n# \n# * _Query 2_: How much did Store 1 sell?\n\n# _Query 1:_ Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member\n\n# In[ ]:\n\n\ntry: \n cur.execute(\"SELECT name, item_name, rewards FROM ((customer_transactions JOIN customer ON customer.customer_id=customer_transactions.customer_id) JOIN items_purchased ON customer_transactions.customer_id=items_purchased.customer_id) WHERE spent > 30 ;\")\n \n \nexcept psycopg2.Error as e: \n print(\"Error: select *\")\n print (e)\n\nrow = cur.fetchone()\nwhile row:\n print(row)\n row = cur.fetchone()\n\n\n# _Query 2:_ How much did Store 1 sell?\n\n# In[ ]:\n\n\ntry: \n cur.execute(\"SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;\")\n \n \nexcept psycopg2.Error as e: \n print(\"Error: select *\")\n print (e)\n\nrow = cur.fetchone()\nwhile row:\n print(row)\n row = cur.fetchone()\n\n\n# ### Summary: What you can see here is from this elegant schema we were able to get \"facts/metrics\" from our fact table (how much each store sold), and also information about our customers that will allow us to do more indepth analytics to get answers to business questions by utilizing our fact and dimension tables. \n\n# ### For the sake of the demo, I will drop the table. \n\n# In[ ]:\n\n\ntry: \n cur.execute(\"DROP table customer_transactions\")\nexcept psycopg2.Error as e: \n print(\"Error: Dropping table\")\n print (e)\ntry: \n cur.execute(\"DROP table items_purchased\")\nexcept psycopg2.Error as e: \n print(\"Error: Dropping table\")\n print (e)\ntry: \n cur.execute(\"DROP table customer\")\nexcept psycopg2.Error as e: \n print(\"Error: Dropping table\")\n print (e)\ntry: \n cur.execute(\"DROP table store\")\nexcept psycopg2.Error as e: \n print(\"Error: Dropping table\")\n print (e)\n\n\n# ### And finally close your cursor and connection. \n\n# In[ ]:\n\n\ncur.close()\nconn.close()\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
<|reserved_special_token_0|>
sns.set(style='white', color_codes=True)
<|reserved_special_token_0|>
sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
<|reserved_special_token_0|>
sns.set(style='white', color_codes=True)
iris = pd.read_csv('finalOutputV1.csv')
sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='white', color_codes=True)
iris = pd.read_csv('finalOutputV1.csv')
sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
plt.show()
<|reserved_special_token_1|>
# First, we'll import pandas, a data processing and CSV file I/O library
import pandas as pd
# We'll also import seaborn, a Python graphing library
import warnings # current version of seaborn generates a bunch of warnings that we'll ignore
warnings.filterwarnings("ignore")
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
# Next, we'll load the Iris flower dataset, which is in the "../input/" directory
iris = pd.read_csv("finalOutputV1.csv") # the iris dataset is now a Pandas DataFrame
# We can look at an individual feature in Seaborn through a boxplot
sns.boxplot(x="Species", y="PetalLengthCm", data=iris)
plt.show()
|
flexible
|
{
"blob_id": "0125abab0312d8f007e76ee710348efc9daae31e",
"index": 4989,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\nsns.set(style='white', color_codes=True)\n<mask token>\nsns.boxplot(x='Species', y='PetalLengthCm', data=iris)\nplt.show()\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\nsns.set(style='white', color_codes=True)\niris = pd.read_csv('finalOutputV1.csv')\nsns.boxplot(x='Species', y='PetalLengthCm', data=iris)\nplt.show()\n",
"step-4": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style='white', color_codes=True)\niris = pd.read_csv('finalOutputV1.csv')\nsns.boxplot(x='Species', y='PetalLengthCm', data=iris)\nplt.show()\n",
"step-5": "# First, we'll import pandas, a data processing and CSV file I/O library\nimport pandas as pd\n\n# We'll also import seaborn, a Python graphing library\nimport warnings # current version of seaborn generates a bunch of warnings that we'll ignore\n\nwarnings.filterwarnings(\"ignore\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set(style=\"white\", color_codes=True)\n\n# Next, we'll load the Iris flower dataset, which is in the \"../input/\" directory\niris = pd.read_csv(\"finalOutputV1.csv\") # the iris dataset is now a Pandas DataFrame\n# We can look at an individual feature in Seaborn through a boxplot\nsns.boxplot(x=\"Species\", y=\"PetalLengthCm\", data=iris)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getTests():
tests = []
suite = testTemplate.testSuite('Sample Test Cases')
testcase = testTemplate.testInstance('3\n1 1 1\n1 1 1\n1 1 1', '6',
'Sample #1')
suite.add(testcase)
testcase = testTemplate.testInstance(
"""11
1 0 0 1 0 0 0 0 0 1 1
1 1 1 1 1 0 1 0 1 0 0
1 0 0 1 0 0 1 1 0 1 0
1 0 1 1 1 0 1 1 0 1 1
0 1 1 1 0 1 0 0 1 1 1
1 1 1 0 0 1 0 0 0 0 0
0 0 0 0 1 0 1 0 0 0 1
1 0 1 1 0 0 0 0 0 0 1
0 0 1 0 1 1 0 0 0 1 1
1 1 1 0 0 0 1 0 1 0 1
1 0 0 0 1 1 1 1 0 0 0"""
, '7588', 'Sample #2')
suite.add(testcase)
testcase = testTemplate.testInstance(
"""11
0 1 1 1 0 1 0 0 0 1 0
0 0 1 1 1 1 1 1 1 1 1
1 1 0 1 0 0 0 0 0 1 0
0 1 0 1 0 1 0 1 0 1 1
1 0 0 1 0 0 0 0 1 0 1
0 0 1 0 1 1 0 0 0 0 1
1 0 1 0 1 1 1 0 1 1 0
1 0 1 1 0 1 1 0 0 1 0
0 0 1 1 0 1 1 1 1 1 1
0 1 0 0 0 0 0 0 0 1 1
0 1 1 0 0 0 0 0 1 0 1 """
, '7426', 'Sample #3')
suite.add(testcase)
tests.append(suite)
return tests
<|reserved_special_token_1|>
import testTemplate
def getTests():
tests = []
suite = testTemplate.testSuite('Sample Test Cases')
testcase = testTemplate.testInstance('3\n1 1 1\n1 1 1\n1 1 1', '6',
'Sample #1')
suite.add(testcase)
testcase = testTemplate.testInstance(
"""11
1 0 0 1 0 0 0 0 0 1 1
1 1 1 1 1 0 1 0 1 0 0
1 0 0 1 0 0 1 1 0 1 0
1 0 1 1 1 0 1 1 0 1 1
0 1 1 1 0 1 0 0 1 1 1
1 1 1 0 0 1 0 0 0 0 0
0 0 0 0 1 0 1 0 0 0 1
1 0 1 1 0 0 0 0 0 0 1
0 0 1 0 1 1 0 0 0 1 1
1 1 1 0 0 0 1 0 1 0 1
1 0 0 0 1 1 1 1 0 0 0"""
, '7588', 'Sample #2')
suite.add(testcase)
testcase = testTemplate.testInstance(
"""11
0 1 1 1 0 1 0 0 0 1 0
0 0 1 1 1 1 1 1 1 1 1
1 1 0 1 0 0 0 0 0 1 0
0 1 0 1 0 1 0 1 0 1 1
1 0 0 1 0 0 0 0 1 0 1
0 0 1 0 1 1 0 0 0 0 1
1 0 1 0 1 1 1 0 1 1 0
1 0 1 1 0 1 1 0 0 1 0
0 0 1 1 0 1 1 1 1 1 1
0 1 0 0 0 0 0 0 0 1 1
0 1 1 0 0 0 0 0 1 0 1 """
, '7426', 'Sample #3')
suite.add(testcase)
tests.append(suite)
return tests
<|reserved_special_token_1|>
import testTemplate
def getTests():
tests = []
suite=testTemplate.testSuite("Sample Test Cases")
testcase = testTemplate.testInstance("3\n1 1 1\n1 1 1\n1 1 1" , "6" , "Sample #1")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0" , "7588" , "Sample #2")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 " , "7426" , "Sample #3")
suite.add(testcase)
tests.append(suite)
return tests
|
flexible
|
{
"blob_id": "de4c31ad474b7ce75631214aceafbe4d7334f14b",
"index": 6956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTests():\n tests = []\n suite = testTemplate.testSuite('Sample Test Cases')\n testcase = testTemplate.testInstance('3\\n1 1 1\\n1 1 1\\n1 1 1', '6',\n 'Sample #1')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0\"\"\"\n , '7588', 'Sample #2')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 \"\"\"\n , '7426', 'Sample #3')\n suite.add(testcase)\n tests.append(suite)\n return tests\n",
"step-3": "import testTemplate\n\n\ndef getTests():\n tests = []\n suite = testTemplate.testSuite('Sample Test Cases')\n testcase = testTemplate.testInstance('3\\n1 1 1\\n1 1 1\\n1 1 1', '6',\n 'Sample #1')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0\"\"\"\n , '7588', 'Sample #2')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 \"\"\"\n , '7426', 'Sample #3')\n suite.add(testcase)\n tests.append(suite)\n return tests\n",
"step-4": "import testTemplate \ndef getTests():\n\ttests = []\n\t\n\tsuite=testTemplate.testSuite(\"Sample Test Cases\")\n\ttestcase = testTemplate.testInstance(\"3\\n1 1 1\\n1 1 1\\n1 1 1\" , \"6\" , \"Sample #1\")\n\tsuite.add(testcase)\n\ttestcase = testTemplate.testInstance(\"11\\n1 0 0 1 0 0 0 0 0 1 1 \\n1 1 1 1 1 0 1 0 1 0 0 \\n1 0 0 1 0 0 1 1 0 1 0 \\n1 0 1 1 1 0 1 1 0 1 1 \\n0 1 1 1 0 1 0 0 1 1 1 \\n1 1 1 0 0 1 0 0 0 0 0 \\n0 0 0 0 1 0 1 0 0 0 1 \\n1 0 1 1 0 0 0 0 0 0 1 \\n0 0 1 0 1 1 0 0 0 1 1 \\n1 1 1 0 0 0 1 0 1 0 1 \\n1 0 0 0 1 1 1 1 0 0 0\" , \"7588\" , \"Sample #2\")\n\tsuite.add(testcase)\n\ttestcase = testTemplate.testInstance(\"11\\n0 1 1 1 0 1 0 0 0 1 0 \\n0 0 1 1 1 1 1 1 1 1 1 \\n1 1 0 1 0 0 0 0 0 1 0 \\n0 1 0 1 0 1 0 1 0 1 1 \\n1 0 0 1 0 0 0 0 1 0 1 \\n0 0 1 0 1 1 0 0 0 0 1 \\n1 0 1 0 1 1 1 0 1 1 0 \\n1 0 1 1 0 1 1 0 0 1 0 \\n0 0 1 1 0 1 1 1 1 1 1 \\n0 1 0 0 0 0 0 0 0 1 1 \\n0 1 1 0 0 0 0 0 1 0 1 \" , \"7426\" , \"Sample #3\")\n\tsuite.add(testcase)\n\ttests.append(suite)\n\t\n\treturn tests\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SIL(SVM):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
svm_X = np.vstack(self._bags)
svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for
bag, cls in zip(self._bags, y)])
super(SIL, self).fit(svm_X, svm_y)
<|reserved_special_token_0|>
def predict(self, bags, instancePrediction=None):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param instancePrediction : flag to indicate if instance predictions
should be given as output.
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if instancePrediction is None:
instancePrediction = False
bags = [np.asmatrix(bag) for bag in bags]
inst_preds = super(SIL, self).predict(np.vstack(bags))
if instancePrediction:
return _inst_to_bag_preds(inst_preds, bags), inst_preds
else:
return _inst_to_bag_preds(inst_preds, bags)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SIL(SVM):
<|reserved_special_token_0|>
def __init__(self, C=1.0, scale_C=True, verbose=True, sv_cutoff=1e-07,
**kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if False [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
"""
self._bags = None
self._bag_predictions = None
self.scale_C = scale_C
self.verbose = verbose
self.sv_cutoff = sv_cutoff
self.C = C
self._X = None
self._y = None
self._objective = None
self._alphas = None
self._sv = None
self._sv_alphas = None
self._sv_X = None
self._sv_y = None
self._b = None
self._predictions = None
super(SIL, self).__init__(**kwargs)
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
svm_X = np.vstack(self._bags)
svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for
bag, cls in zip(self._bags, y)])
super(SIL, self).fit(svm_X, svm_y)
<|reserved_special_token_0|>
def predict(self, bags, instancePrediction=None):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param instancePrediction : flag to indicate if instance predictions
should be given as output.
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if instancePrediction is None:
instancePrediction = False
bags = [np.asmatrix(bag) for bag in bags]
inst_preds = super(SIL, self).predict(np.vstack(bags))
if instancePrediction:
return _inst_to_bag_preds(inst_preds, bags), inst_preds
else:
return _inst_to_bag_preds(inst_preds, bags)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SIL(SVM):
"""
Single-Instance Learning applied to MI data
"""
def __init__(self, C=1.0, scale_C=True, verbose=True, sv_cutoff=1e-07,
**kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if False [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
"""
self._bags = None
self._bag_predictions = None
self.scale_C = scale_C
self.verbose = verbose
self.sv_cutoff = sv_cutoff
self.C = C
self._X = None
self._y = None
self._objective = None
self._alphas = None
self._sv = None
self._sv_alphas = None
self._sv_X = None
self._sv_y = None
self._b = None
self._predictions = None
super(SIL, self).__init__(**kwargs)
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
svm_X = np.vstack(self._bags)
svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for
bag, cls in zip(self._bags, y)])
super(SIL, self).fit(svm_X, svm_y)
def _compute_separator(self, K):
super(SIL, self)._compute_separator(K)
self._bag_predictions = _inst_to_bag_preds(self._predictions, self.
_bags)
def predict(self, bags, instancePrediction=None):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param instancePrediction : flag to indicate if instance predictions
should be given as output.
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if instancePrediction is None:
instancePrediction = False
bags = [np.asmatrix(bag) for bag in bags]
inst_preds = super(SIL, self).predict(np.vstack(bags))
if instancePrediction:
return _inst_to_bag_preds(inst_preds, bags), inst_preds
else:
return _inst_to_bag_preds(inst_preds, bags)
def get_params(self, deep=True):
"""
return params
"""
args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)
args.pop(0)
return {key: getattr(self, key, None) for key in args}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SIL(SVM):
"""
Single-Instance Learning applied to MI data
"""
def __init__(self, C=1.0, scale_C=True, verbose=True, sv_cutoff=1e-07,
**kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if False [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
"""
self._bags = None
self._bag_predictions = None
self.scale_C = scale_C
self.verbose = verbose
self.sv_cutoff = sv_cutoff
self.C = C
self._X = None
self._y = None
self._objective = None
self._alphas = None
self._sv = None
self._sv_alphas = None
self._sv_X = None
self._sv_y = None
self._b = None
self._predictions = None
super(SIL, self).__init__(**kwargs)
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
svm_X = np.vstack(self._bags)
svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for
bag, cls in zip(self._bags, y)])
super(SIL, self).fit(svm_X, svm_y)
def _compute_separator(self, K):
super(SIL, self)._compute_separator(K)
self._bag_predictions = _inst_to_bag_preds(self._predictions, self.
_bags)
def predict(self, bags, instancePrediction=None):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param instancePrediction : flag to indicate if instance predictions
should be given as output.
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if instancePrediction is None:
instancePrediction = False
bags = [np.asmatrix(bag) for bag in bags]
inst_preds = super(SIL, self).predict(np.vstack(bags))
if instancePrediction:
return _inst_to_bag_preds(inst_preds, bags), inst_preds
else:
return _inst_to_bag_preds(inst_preds, bags)
def get_params(self, deep=True):
"""
return params
"""
args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)
args.pop(0)
return {key: getattr(self, key, None) for key in args}
def _inst_to_bag_preds(inst_preds, bags):
return np.array([np.max(inst_preds[slice(*bidx)]) for bidx in slices(
map(len, bags))])
<|reserved_special_token_1|>
"""
Implements Single Instance Learning SVM
From https://github.com/garydoranjr/misvm/blob/master/misvm/sil.py
Modified by Nicolas
"""
from __future__ import print_function, division
import numpy as np
import inspect
from sklearn.svm import LinearSVC as SVM
from milsvm.util import slices
class SIL(SVM):
"""
Single-Instance Learning applied to MI data
"""
def __init__(self,C=1.0, scale_C=True,
verbose=True, sv_cutoff=1e-7, **kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if False [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
"""
self._bags = None
self._bag_predictions = None
self.scale_C = scale_C
self.verbose = verbose
self.sv_cutoff = sv_cutoff
self.C = C
self._X = None
self._y = None
self._objective = None
self._alphas = None
self._sv = None
self._sv_alphas = None
self._sv_X = None
self._sv_y = None
self._b = None
self._predictions = None
super(SIL, self).__init__(**kwargs)
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
svm_X = np.vstack(self._bags)
svm_y = np.vstack([float(cls) * np.matrix(np.ones((len(bag), 1)))
for bag, cls in zip(self._bags, y)])
super(SIL, self).fit(svm_X, svm_y)
def _compute_separator(self, K):
super(SIL, self)._compute_separator(K)
self._bag_predictions = _inst_to_bag_preds(self._predictions, self._bags)
def predict(self, bags, instancePrediction = None):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param instancePrediction : flag to indicate if instance predictions
should be given as output.
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if instancePrediction is None:
instancePrediction = False
bags = [np.asmatrix(bag) for bag in bags]
inst_preds = super(SIL, self).predict(np.vstack(bags))
if instancePrediction:
return _inst_to_bag_preds(inst_preds, bags), inst_preds
else:
return _inst_to_bag_preds(inst_preds, bags)
def get_params(self, deep=True):
"""
return params
"""
args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)
args.pop(0)
return {key: getattr(self, key, None) for key in args}
def _inst_to_bag_preds(inst_preds, bags):
return np.array([np.max(inst_preds[slice(*bidx)])
for bidx in slices(map(len, bags))])
|
flexible
|
{
"blob_id": "f125269d5b52da41734ce94683139c44f0c4a66a",
"index": 3402,
"step-1": "<mask token>\n\n\nclass SIL(SVM):\n <mask token>\n <mask token>\n\n def fit(self, bags, y):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param y : an array-like object of length n containing -1/+1 labels\n \"\"\"\n self._bags = [np.asmatrix(bag) for bag in bags]\n y = np.asmatrix(y).reshape((-1, 1))\n svm_X = np.vstack(self._bags)\n svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for\n bag, cls in zip(self._bags, y)])\n super(SIL, self).fit(svm_X, svm_y)\n <mask token>\n\n def predict(self, bags, instancePrediction=None):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param instancePrediction : flag to indicate if instance predictions \n should be given as output.\n @return : an array of length n containing real-valued label predictions\n (threshold at zero to produce binary predictions)\n \"\"\"\n if instancePrediction is None:\n instancePrediction = False\n bags = [np.asmatrix(bag) for bag in bags]\n inst_preds = super(SIL, self).predict(np.vstack(bags))\n if instancePrediction:\n return _inst_to_bag_preds(inst_preds, bags), inst_preds\n else:\n return _inst_to_bag_preds(inst_preds, bags)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SIL(SVM):\n <mask token>\n\n def __init__(self, C=1.0, scale_C=True, verbose=True, sv_cutoff=1e-07,\n **kwargs):\n \"\"\"\n @param kernel : the desired kernel function; can be linear, quadratic,\n polynomial, or rbf [default: linear]\n @param C : the loss/regularization tradeoff constant [default: 1.0]\n @param scale_C : if False [default], scale C by the number of examples\n @param p : polynomial degree when a 'polynomial' kernel is used\n [default: 3]\n @param gamma : RBF scale parameter when an 'rbf' kernel is used\n [default: 1.0]\n @param verbose : print optimization status messages [default: True]\n @param sv_cutoff : the numerical cutoff for an example to be considered\n a support vector [default: 1e-7]\n \"\"\"\n self._bags = None\n self._bag_predictions = None\n self.scale_C = scale_C\n self.verbose = verbose\n self.sv_cutoff = sv_cutoff\n self.C = C\n self._X = None\n self._y = None\n self._objective = None\n self._alphas = None\n self._sv = None\n self._sv_alphas = None\n self._sv_X = None\n self._sv_y = None\n self._b = None\n self._predictions = None\n super(SIL, self).__init__(**kwargs)\n\n def fit(self, bags, y):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param y : an array-like object of length n containing -1/+1 labels\n \"\"\"\n self._bags = [np.asmatrix(bag) for bag in bags]\n y = np.asmatrix(y).reshape((-1, 1))\n svm_X = np.vstack(self._bags)\n svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for\n bag, cls in zip(self._bags, y)])\n super(SIL, self).fit(svm_X, svm_y)\n <mask token>\n\n def predict(self, bags, instancePrediction=None):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param instancePrediction : flag to indicate if instance predictions \n should be given as output.\n @return : an array of length n containing real-valued label predictions\n (threshold at zero to produce binary predictions)\n \"\"\"\n if instancePrediction is None:\n instancePrediction = False\n bags = [np.asmatrix(bag) for bag in bags]\n inst_preds = super(SIL, self).predict(np.vstack(bags))\n if instancePrediction:\n return _inst_to_bag_preds(inst_preds, bags), inst_preds\n else:\n return _inst_to_bag_preds(inst_preds, bags)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SIL(SVM):\n \"\"\"\n Single-Instance Learning applied to MI data\n \"\"\"\n\n def __init__(self, C=1.0, scale_C=True, verbose=True, sv_cutoff=1e-07,\n **kwargs):\n \"\"\"\n @param kernel : the desired kernel function; can be linear, quadratic,\n polynomial, or rbf [default: linear]\n @param C : the loss/regularization tradeoff constant [default: 1.0]\n @param scale_C : if False [default], scale C by the number of examples\n @param p : polynomial degree when a 'polynomial' kernel is used\n [default: 3]\n @param gamma : RBF scale parameter when an 'rbf' kernel is used\n [default: 1.0]\n @param verbose : print optimization status messages [default: True]\n @param sv_cutoff : the numerical cutoff for an example to be considered\n a support vector [default: 1e-7]\n \"\"\"\n self._bags = None\n self._bag_predictions = None\n self.scale_C = scale_C\n self.verbose = verbose\n self.sv_cutoff = sv_cutoff\n self.C = C\n self._X = None\n self._y = None\n self._objective = None\n self._alphas = None\n self._sv = None\n self._sv_alphas = None\n self._sv_X = None\n self._sv_y = None\n self._b = None\n self._predictions = None\n super(SIL, self).__init__(**kwargs)\n\n def fit(self, bags, y):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param y : an array-like object of length n containing -1/+1 labels\n \"\"\"\n self._bags = [np.asmatrix(bag) for bag in bags]\n y = np.asmatrix(y).reshape((-1, 1))\n svm_X = np.vstack(self._bags)\n svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for\n bag, cls in zip(self._bags, y)])\n super(SIL, self).fit(svm_X, svm_y)\n\n def _compute_separator(self, K):\n super(SIL, self)._compute_separator(K)\n self._bag_predictions = _inst_to_bag_preds(self._predictions, self.\n _bags)\n\n def predict(self, bags, instancePrediction=None):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param instancePrediction : flag to indicate if instance predictions \n should be given as output.\n @return : an array of length n containing real-valued label predictions\n (threshold at zero to produce binary predictions)\n \"\"\"\n if instancePrediction is None:\n instancePrediction = False\n bags = [np.asmatrix(bag) for bag in bags]\n inst_preds = super(SIL, self).predict(np.vstack(bags))\n if instancePrediction:\n return _inst_to_bag_preds(inst_preds, bags), inst_preds\n else:\n return _inst_to_bag_preds(inst_preds, bags)\n\n def get_params(self, deep=True):\n \"\"\"\n return params\n \"\"\"\n args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)\n args.pop(0)\n return {key: getattr(self, key, None) for key in args}\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SIL(SVM):\n \"\"\"\n Single-Instance Learning applied to MI data\n \"\"\"\n\n def __init__(self, C=1.0, scale_C=True, verbose=True, sv_cutoff=1e-07,\n **kwargs):\n \"\"\"\n @param kernel : the desired kernel function; can be linear, quadratic,\n polynomial, or rbf [default: linear]\n @param C : the loss/regularization tradeoff constant [default: 1.0]\n @param scale_C : if False [default], scale C by the number of examples\n @param p : polynomial degree when a 'polynomial' kernel is used\n [default: 3]\n @param gamma : RBF scale parameter when an 'rbf' kernel is used\n [default: 1.0]\n @param verbose : print optimization status messages [default: True]\n @param sv_cutoff : the numerical cutoff for an example to be considered\n a support vector [default: 1e-7]\n \"\"\"\n self._bags = None\n self._bag_predictions = None\n self.scale_C = scale_C\n self.verbose = verbose\n self.sv_cutoff = sv_cutoff\n self.C = C\n self._X = None\n self._y = None\n self._objective = None\n self._alphas = None\n self._sv = None\n self._sv_alphas = None\n self._sv_X = None\n self._sv_y = None\n self._b = None\n self._predictions = None\n super(SIL, self).__init__(**kwargs)\n\n def fit(self, bags, y):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param y : an array-like object of length n containing -1/+1 labels\n \"\"\"\n self._bags = [np.asmatrix(bag) for bag in bags]\n y = np.asmatrix(y).reshape((-1, 1))\n svm_X = np.vstack(self._bags)\n svm_y = np.vstack([(float(cls) * np.matrix(np.ones((len(bag), 1)))) for\n bag, cls in zip(self._bags, y)])\n super(SIL, self).fit(svm_X, svm_y)\n\n def _compute_separator(self, K):\n super(SIL, self)._compute_separator(K)\n self._bag_predictions = _inst_to_bag_preds(self._predictions, self.\n _bags)\n\n def predict(self, bags, instancePrediction=None):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param instancePrediction : flag to indicate if instance predictions \n should be given as output.\n @return : an array of length n containing real-valued label predictions\n (threshold at zero to produce binary predictions)\n \"\"\"\n if instancePrediction is None:\n instancePrediction = False\n bags = [np.asmatrix(bag) for bag in bags]\n inst_preds = super(SIL, self).predict(np.vstack(bags))\n if instancePrediction:\n return _inst_to_bag_preds(inst_preds, bags), inst_preds\n else:\n return _inst_to_bag_preds(inst_preds, bags)\n\n def get_params(self, deep=True):\n \"\"\"\n return params\n \"\"\"\n args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)\n args.pop(0)\n return {key: getattr(self, key, None) for key in args}\n\n\ndef _inst_to_bag_preds(inst_preds, bags):\n return np.array([np.max(inst_preds[slice(*bidx)]) for bidx in slices(\n map(len, bags))])\n",
"step-5": "\"\"\"\nImplements Single Instance Learning SVM\nFrom https://github.com/garydoranjr/misvm/blob/master/misvm/sil.py\nModified by Nicolas\n\"\"\"\nfrom __future__ import print_function, division\nimport numpy as np\nimport inspect\nfrom sklearn.svm import LinearSVC as SVM\nfrom milsvm.util import slices\n\n\nclass SIL(SVM):\n \"\"\"\n Single-Instance Learning applied to MI data\n \"\"\"\n\n def __init__(self,C=1.0, scale_C=True,\n verbose=True, sv_cutoff=1e-7, **kwargs):\n \"\"\"\n @param kernel : the desired kernel function; can be linear, quadratic,\n polynomial, or rbf [default: linear]\n @param C : the loss/regularization tradeoff constant [default: 1.0]\n @param scale_C : if False [default], scale C by the number of examples\n @param p : polynomial degree when a 'polynomial' kernel is used\n [default: 3]\n @param gamma : RBF scale parameter when an 'rbf' kernel is used\n [default: 1.0]\n @param verbose : print optimization status messages [default: True]\n @param sv_cutoff : the numerical cutoff for an example to be considered\n a support vector [default: 1e-7]\n \"\"\"\n \n self._bags = None\n self._bag_predictions = None\n self.scale_C = scale_C\n self.verbose = verbose\n self.sv_cutoff = sv_cutoff\n self.C = C\n\n self._X = None\n self._y = None\n self._objective = None\n self._alphas = None\n self._sv = None\n self._sv_alphas = None\n self._sv_X = None\n self._sv_y = None\n self._b = None\n self._predictions = None\n super(SIL, self).__init__(**kwargs)\n\n def fit(self, bags, y):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param y : an array-like object of length n containing -1/+1 labels\n \"\"\"\n self._bags = [np.asmatrix(bag) for bag in bags]\n y = np.asmatrix(y).reshape((-1, 1))\n svm_X = np.vstack(self._bags)\n svm_y = np.vstack([float(cls) * np.matrix(np.ones((len(bag), 1)))\n for bag, cls in zip(self._bags, y)])\n super(SIL, self).fit(svm_X, svm_y)\n\n def _compute_separator(self, K):\n super(SIL, self)._compute_separator(K)\n self._bag_predictions = _inst_to_bag_preds(self._predictions, self._bags)\n\n def predict(self, bags, instancePrediction = None):\n \"\"\"\n @param bags : a sequence of n bags; each bag is an m-by-k array-like\n object containing m instances with k features\n @param instancePrediction : flag to indicate if instance predictions \n should be given as output.\n @return : an array of length n containing real-valued label predictions\n (threshold at zero to produce binary predictions)\n \"\"\"\n if instancePrediction is None:\n instancePrediction = False\n \n bags = [np.asmatrix(bag) for bag in bags]\n inst_preds = super(SIL, self).predict(np.vstack(bags))\n\n if instancePrediction: \n return _inst_to_bag_preds(inst_preds, bags), inst_preds\n else:\n return _inst_to_bag_preds(inst_preds, bags)\n\n def get_params(self, deep=True):\n \"\"\"\n return params\n \"\"\"\n args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)\n args.pop(0)\n return {key: getattr(self, key, None) for key in args}\n\n\ndef _inst_to_bag_preds(inst_preds, bags):\n return np.array([np.max(inst_preds[slice(*bidx)])\n for bidx in slices(map(len, bags))])\n",
"step-ids": [
3,
4,
7,
8,
10
]
}
|
[
3,
4,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
src_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'
dst_bmp_dir = 'D:/Temp/'
jpg_files = glob.glob(src_jpg_dir + '*.jpg')
cnt = 0
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
from PIL import Image
import glob
import shutil
src_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'
dst_bmp_dir = 'D:/Temp/'
jpg_files = glob.glob(src_jpg_dir + '*.jpg')
cnt = 0
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 20 07:48:47 2018
@author: hfuji
"""
import os
from PIL import Image
import glob
import shutil
src_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'
dst_bmp_dir = 'D:/Temp/'
jpg_files = glob.glob(src_jpg_dir + '*.jpg')
cnt = 0
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
# pil_img = Image.open(jpg_file)
# pil_img.save(dst_bmp_path, "bmp")
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
|
flexible
|
{
"blob_id": "a57059927a7bd3311c1d104bfc80877912c7d995",
"index": 125,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor jpg_file in jpg_files:\n basename = os.path.basename(jpg_file)\n if int(basename[:-4]) % 10 == 0:\n cnt += 1\n dirname = os.path.dirname(jpg_file)\n dirs = dirname.split('/')\n new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'\n dst_bmp_path = dst_bmp_dir + new_fname\n print(dst_bmp_path)\n shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')\n if cnt > 3:\n break\n",
"step-3": "<mask token>\nsrc_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'\ndst_bmp_dir = 'D:/Temp/'\njpg_files = glob.glob(src_jpg_dir + '*.jpg')\ncnt = 0\nfor jpg_file in jpg_files:\n basename = os.path.basename(jpg_file)\n if int(basename[:-4]) % 10 == 0:\n cnt += 1\n dirname = os.path.dirname(jpg_file)\n dirs = dirname.split('/')\n new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'\n dst_bmp_path = dst_bmp_dir + new_fname\n print(dst_bmp_path)\n shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')\n if cnt > 3:\n break\n",
"step-4": "<mask token>\nimport os\nfrom PIL import Image\nimport glob\nimport shutil\nsrc_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'\ndst_bmp_dir = 'D:/Temp/'\njpg_files = glob.glob(src_jpg_dir + '*.jpg')\ncnt = 0\nfor jpg_file in jpg_files:\n basename = os.path.basename(jpg_file)\n if int(basename[:-4]) % 10 == 0:\n cnt += 1\n dirname = os.path.dirname(jpg_file)\n dirs = dirname.split('/')\n new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'\n dst_bmp_path = dst_bmp_dir + new_fname\n print(dst_bmp_path)\n shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')\n if cnt > 3:\n break\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 20 07:48:47 2018\r\n\r\n@author: hfuji\r\n\"\"\"\r\n\r\nimport os\r\nfrom PIL import Image\r\nimport glob\r\nimport shutil\r\n\r\nsrc_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'\r\ndst_bmp_dir = 'D:/Temp/'\r\n\r\njpg_files = glob.glob(src_jpg_dir + '*.jpg')\r\n\r\ncnt = 0\r\nfor jpg_file in jpg_files:\r\n basename = os.path.basename(jpg_file)\r\n if int(basename[:-4]) % 10 == 0:\r\n cnt += 1\r\n dirname = os.path.dirname(jpg_file)\r\n dirs = dirname.split('/')\r\n new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'\r\n dst_bmp_path = dst_bmp_dir + new_fname\r\n print(dst_bmp_path)\r\n# pil_img = Image.open(jpg_file)\r\n# pil_img.save(dst_bmp_path, \"bmp\")\r\n shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')\r\n if cnt > 3:\r\n break",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
30. Convertir P libras inglesas a D dólares y C centavos. Usar el tipo de cambio $2.80 = 1 libra
p=2.80
x=int(input("Desea convertir sus libras a dolar(1) o a centavos(2)"))
if x == 1:
d=float(input("¿Cuantas libras desea convertir a dólar?\n"))
conversion = (d/p)
if x == 2:
c=float(input("¿Cuantas libras desea convertir a centavos?\n"))
conversion = c/100
print("El resultado es:")
print(float(conversion))
|
normal
|
{
"blob_id": "ebc2acbcbab787b07c97b0a4ea8fbaeb9d8e30aa",
"index": 9770,
"step-1": "30. Convertir P libras inglesas a D dólares y C centavos. Usar el tipo de cambio $2.80 = 1 libra\r\np=2.80\r\n\r\nx=int(input(\"Desea convertir sus libras a dolar(1) o a centavos(2)\"))\r\n\r\nif x == 1:\r\n d=float(input(\"¿Cuantas libras desea convertir a dólar?\\n\"))\r\n conversion = (d/p)\r\nif x == 2:\r\n c=float(input(\"¿Cuantas libras desea convertir a centavos?\\n\"))\r\n conversion = c/100\r\nprint(\"El resultado es:\")\r\nprint(float(conversion))\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
if __name__ == '__main__':
program = get_program('input.txt')
addresses_v1 = run_program_v1(program)
part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])
print(f'Part One Answer: {part_one}')
addresses_v2 = run_program_v2(program)
part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])
print(f'Part Two Answer: {part_two}')
<|reserved_special_token_1|>
from collections import defaultdict
from mask import Mask
from utils import bits_to_decimal
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
if __name__ == '__main__':
program = get_program('input.txt')
addresses_v1 = run_program_v1(program)
part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])
print(f'Part One Answer: {part_one}')
addresses_v2 = run_program_v2(program)
part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])
print(f'Part Two Answer: {part_two}')
<|reserved_special_token_1|>
from collections import defaultdict
from mask import Mask
from utils import bits_to_decimal
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
if __name__ == "__main__":
program = get_program('input.txt')
addresses_v1 = run_program_v1(program)
part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])
print(f'Part One Answer: {part_one}')
addresses_v2 = run_program_v2(program)
part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])
print(f'Part Two Answer: {part_two}')
|
flexible
|
{
"blob_id": "56e8cdec854b3b7a2f925e70d7d59a73b76f9952",
"index": 9340,
"step-1": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == '__main__':\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-4": "from collections import defaultdict\nfrom mask import Mask\nfrom utils import bits_to_decimal\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == '__main__':\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-5": "from collections import defaultdict\n\nfrom mask import Mask\nfrom utils import bits_to_decimal\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == \"__main__\":\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import json
import pika
import urllib.request
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url,
queue='urlValidationQueue',
no_ack=True)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f'Got new URL to check: {message["url"]}.')
try:
urllib.request.urlopen('https://github.com/' + message["url"])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(message["id"]) + '/update',
json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',
headers={'content-type': 'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
if __name__ == '__main__':
print("Validator worker started. Waiting for tasks to do...")
validate_urls()
|
normal
|
{
"blob_id": "4a09096abf073294afcf21b1eff9350329d4db33",
"index": 5252,
"step-1": "<mask token>\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print('Validator worker started. Waiting for tasks to do...')\n validate_urls()\n",
"step-4": "import json\nimport pika\nimport urllib.request\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print('Validator worker started. Waiting for tasks to do...')\n validate_urls()\n",
"step-5": "import json\nimport pika\nimport urllib.request\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url,\n queue='urlValidationQueue',\n no_ack=True)\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f'Got new URL to check: {message[\"url\"]}.')\n\n try:\n urllib.request.urlopen('https://github.com/' + message[\"url\"])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(message[\"id\"]) + '/update',\n json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',\n headers={'content-type': 'application/json'})\n\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print(\"Validator worker started. Waiting for tasks to do...\")\n validate_urls()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.logging.set_verbosity(tf.logging.INFO)
<|reserved_special_token_0|>
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
if __name__ == '__main__':
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
train_data, eval_data, test_data, gt = data_load.load()
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
if DRAW_SAMPLE == True:
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
plt.figure(figsize=(20, 40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize=(20, 40))
plt.title('gt')
plt.imshow(label_sample)
pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from
=os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),
vars_to_warm_start=tf.get_collection(tf.GraphKeys.
TRAINABLE_VARIABLES, scope='vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir
=os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)
if TRAIN == True:
for epoch in range(100):
train_input_fn = tf.estimator.inputs.numpy_input_fn(x=
train_data['x'], y=train_data['y'], batch_size=1,
num_epochs=None, shuffle=True)
fcn_segmentor.train(input_fn=train_input_fn, steps=200)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[
'x'], y=eval_data['y'], num_epochs=1, batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
if PREDICT == True:
pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],
y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)
pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32, 16))
for i, p in enumerate(pred):
fig.add_subplot(3, 1, 1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3, 1, 2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3, 1, 3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir, 'predictions', filename))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.logging.set_verbosity(tf.logging.INFO)
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
root_logdir = 'logs'
logdir = '{}/run-{}/'.format(root_logdir, now)
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
if __name__ == '__main__':
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
train_data, eval_data, test_data, gt = data_load.load()
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
if DRAW_SAMPLE == True:
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
plt.figure(figsize=(20, 40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize=(20, 40))
plt.title('gt')
plt.imshow(label_sample)
pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from
=os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),
vars_to_warm_start=tf.get_collection(tf.GraphKeys.
TRAINABLE_VARIABLES, scope='vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir
=os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)
if TRAIN == True:
for epoch in range(100):
train_input_fn = tf.estimator.inputs.numpy_input_fn(x=
train_data['x'], y=train_data['y'], batch_size=1,
num_epochs=None, shuffle=True)
fcn_segmentor.train(input_fn=train_input_fn, steps=200)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[
'x'], y=eval_data['y'], num_epochs=1, batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
if PREDICT == True:
pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],
y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)
pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32, 16))
for i, p in enumerate(pred):
fig.add_subplot(3, 1, 1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3, 1, 2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3, 1, 3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir, 'predictions', filename))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import data_load
from datetime import datetime
tf.logging.set_verbosity(tf.logging.INFO)
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
root_logdir = 'logs'
logdir = '{}/run-{}/'.format(root_logdir, now)
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
if __name__ == '__main__':
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
train_data, eval_data, test_data, gt = data_load.load()
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
if DRAW_SAMPLE == True:
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
plt.figure(figsize=(20, 40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize=(20, 40))
plt.title('gt')
plt.imshow(label_sample)
pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from
=os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),
vars_to_warm_start=tf.get_collection(tf.GraphKeys.
TRAINABLE_VARIABLES, scope='vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir
=os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)
if TRAIN == True:
for epoch in range(100):
train_input_fn = tf.estimator.inputs.numpy_input_fn(x=
train_data['x'], y=train_data['y'], batch_size=1,
num_epochs=None, shuffle=True)
fcn_segmentor.train(input_fn=train_input_fn, steps=200)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[
'x'], y=eval_data['y'], num_epochs=1, batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
if PREDICT == True:
pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],
y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)
pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32, 16))
for i, p in enumerate(pred):
fig.add_subplot(3, 1, 1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3, 1, 2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3, 1, 3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir, 'predictions', filename))
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 26 23:42:11 2018
@author: pohsuanh
Fully Covolutional Network FCN-32s.
FCN-32s network is based on VGG-16
"""
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import data_load
from datetime import datetime
tf.logging.set_verbosity(tf.logging.INFO)
# assign each run to a separate log file, so the tensorboard can function properly.
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "logs"
logdir = "{}/run-{}/".format(root_logdir,now)
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN :
trainable = True
seed = 2019
with tf.name_scope("vgg16_pretrained"):
x = tf.layers.conv2d(features, 64, (3, 3),
activation='relu',
padding='same',
name='conv1_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3),
activation='relu',
padding='same',
name='conv1_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
# Block 2
x = tf.layers.conv2d(x, 128, (3, 3),
activation='relu',
padding='same',
name='conv2_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3),
activation='relu',
padding='same',
name='conv2-2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp2_2')
x = tf.layers.max_pooling2d(x,(2, 2), strides=(2, 2), name='pool2')
# Block 3
x = tf.layers.conv2d (x, 256, (3, 3),
activation='relu',
padding='same',
name='conv3_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_1')
x = tf.layers.conv2d (x, 256, (3, 3),
activation='relu',
padding='same',
name='conv3_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_2')
x = tf.layers.conv2d (x, 256, (3, 3),
activation='relu',
padding='same',
name='conv3_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
# Block 4
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv4_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_1')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv4_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_2')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv4_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
# Block 5
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv5_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_1')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv5_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_2')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv5_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope("deconv_layers"):
# Block 6
x = tf.layers.conv2d(x, 4096, (7,7),
activation='relu',
padding='same',
name='conv6_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_1')
x = tf.layers.conv2d(x, 4096, (1,1),
activation='relu',
padding='same',
name='conv6_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_2')
x = tf.layers.conv2d(x, 1, (1,1),
activation='relu',
padding='same',
name='conv6_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_3')
# There are two classes [1: road, 0: non-road]
heatmap = tf.layers.conv2d_transpose(x, 1, (64,64), strides=(32,32),
activation='linear',
padding='same',
name='deconv6_1',
kernel_regularizer= L2,
trainable = trainable)
logit = tf.nn.sigmoid(heatmap, name = 'logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis = 3)
# print(heatmap.shape)
# Do pixel-wise classification :
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": pred, # tf.argmax(logit, axis =3 )
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the logging_hook`.
"probabilities": logit #tf.nn.softmax(logit, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
# Homework requires tf.nn.sigmoid_cross_entropy_with_logits()
if False :
# ignore where label is -1 , which corresponds to Void.
logit_f = tf.reshape(heatmap, (-1,1,1,1)) # flatten the output
logit_f = tf.squeeze(logit_f, axis = [2,3])
label_f = tf.reshape(labels,(-1,1))
keep = tf.where(tf.greater_equal(labels, 0) )
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f) # Void is labelled -1, which should be excluded from the loss func
# sigmoid_cross_entorpy implements tf.nn.sparse_signoid_cross_entropy_with_logit,
# it will convert output to logit in the op
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels = label_f, logits=logit_f)
heatmap = tf.squeeze(heatmap, axis =3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy( multi_class_labels = label_f ,logits = heatmap)
# Configure the trainable Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum = 0.99)
train_op = optimizer.minimize(loss=loss, global_step = tf.train.get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
# Set up logging for metrics
iou = tf.metrics.mean_iou(label_f,predictions['classes'], num_classes = 2 , name = 'mean_iou')
eval_metric_ops = {"IoU": iou}
tensors_to_log_prob = {"probabilities": "deconv_layers/logit"}
tensors_to_log_iou = {"mean_iou": iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log_iou, every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL :
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops = eval_metric_ops)
#%%
if __name__ == "__main__":
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
# Load training and eval data
train_data, eval_data, test_data, gt = data_load.load()
# Flags
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
# Construct model
if DRAW_SAMPLE == True :
# pic = np.random.randint((test_data['x']).shape[0])
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
# image_sample = tf.Session().run(image_sample)
#
# label_sample = tf.Session().run(label_sample)
plt.figure(figsize=(20,40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize =(20,40))
plt.title('gt')
plt.imshow(label_sample)
# Create the Estimator
pretrained_weights = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=os.path.join(root_dir,'pretrained_weights','vgg_16.ckpt'),
vars_to_warm_start= tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(
model_fn=fcn_model_fn, model_dir=os.path.join(root_dir, 'ckpts'), warm_start_from= pretrained_weights)
if TRAIN == True :
for epoch in range(100):
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=train_data['x'],
y=train_data['y'],
batch_size=1,
num_epochs=None, # number of epochs to iterate over data. If None will run forever.
shuffle=True)
fcn_segmentor.train(
input_fn=train_input_fn,
steps=200
)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=eval_data['x'],
y=eval_data['y'],
num_epochs=1,
batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
#%% We withhold the predction from test set untill all the hyperparameters are finetuned.
if PREDICT == True :
pred_input_fn = tf.estimator.inputs.numpy_input_fn(
x=test_data['x'],
y=test_data['y'],
batch_size =1,
num_epochs=1,
shuffle=False)
# predict method returns a generator
pred = list( fcn_segmentor.predict(input_fn = pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32,16))
for i, p in enumerate(pred) :
fig.add_subplot(3,1,1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3,1,2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3,1,3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir,'predictions',filename))
|
flexible
|
{
"blob_id": "df6fa0409500f97e5afde8f97796d6ed0cc4d746",
"index": 1330,
"step-1": "<mask token>\n\n\ndef fcn_model_fn(features, labels, mode):\n L2 = tf.contrib.layers.l2_regularizer(scale=0.1)\n trainable = False\n if mode == tf.estimator.ModeKeys.TRAIN:\n trainable = True\n seed = 2019\n with tf.name_scope('vgg16_pretrained'):\n x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',\n padding='same', name='conv1_1', kernel_regularizer=L2,\n trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_1')\n x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=\n 'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_1')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')\n with tf.name_scope('deconv_layers'):\n x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=\n 'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_1')\n x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=\n 'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_2')\n x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=\n 'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_3')\n heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32, \n 32), activation='linear', padding='same', name='deconv6_1',\n kernel_regularizer=L2, trainable=trainable)\n logit = tf.nn.sigmoid(heatmap, name='logit')\n pred = tf.to_int32(logit > 0.5)\n pred = tf.squeeze(pred, axis=3)\n predictions = {'classes': pred, 'probabilities': logit}\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n if False:\n logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))\n logit_f = tf.squeeze(logit_f, axis=[2, 3])\n label_f = tf.reshape(labels, (-1, 1))\n keep = tf.where(tf.greater_equal(labels, 0))\n logit_f = tf.gather(logit_f, keep)\n label_f = tf.gather(label_f, keep)\n tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=logit_f)\n heatmap = tf.squeeze(heatmap, axis=3)\n label_f = tf.to_int32(labels > 0)\n tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=heatmap)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,\n momentum=0.99)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.\n get_global_step())\n tf.summary.scalar('train_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=\n train_op)\n iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=\n 2, name='mean_iou')\n eval_metric_ops = {'IoU': iou}\n tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}\n tensors_to_log_iou = {'mean_iou': iou}\n tf.summary.scalar('mean_iou', iou[0])\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,\n every_n_iter=200)\n if mode == tf.estimator.ModeKeys.EVAL:\n tf.summary.scalar('eval_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\n<mask token>\n",
"step-2": "<mask token>\ntf.logging.set_verbosity(tf.logging.INFO)\n<mask token>\n\n\ndef fcn_model_fn(features, labels, mode):\n L2 = tf.contrib.layers.l2_regularizer(scale=0.1)\n trainable = False\n if mode == tf.estimator.ModeKeys.TRAIN:\n trainable = True\n seed = 2019\n with tf.name_scope('vgg16_pretrained'):\n x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',\n padding='same', name='conv1_1', kernel_regularizer=L2,\n trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_1')\n x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=\n 'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_1')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')\n with tf.name_scope('deconv_layers'):\n x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=\n 'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_1')\n x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=\n 'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_2')\n x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=\n 'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_3')\n heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32, \n 32), activation='linear', padding='same', name='deconv6_1',\n kernel_regularizer=L2, trainable=trainable)\n logit = tf.nn.sigmoid(heatmap, name='logit')\n pred = tf.to_int32(logit > 0.5)\n pred = tf.squeeze(pred, axis=3)\n predictions = {'classes': pred, 'probabilities': logit}\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n if False:\n logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))\n logit_f = tf.squeeze(logit_f, axis=[2, 3])\n label_f = tf.reshape(labels, (-1, 1))\n keep = tf.where(tf.greater_equal(labels, 0))\n logit_f = tf.gather(logit_f, keep)\n label_f = tf.gather(label_f, keep)\n tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=logit_f)\n heatmap = tf.squeeze(heatmap, axis=3)\n label_f = tf.to_int32(labels > 0)\n tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=heatmap)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,\n momentum=0.99)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.\n get_global_step())\n tf.summary.scalar('train_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=\n train_op)\n iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=\n 2, name='mean_iou')\n eval_metric_ops = {'IoU': iou}\n tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}\n tensors_to_log_iou = {'mean_iou': iou}\n tf.summary.scalar('mean_iou', iou[0])\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,\n every_n_iter=200)\n if mode == tf.estimator.ModeKeys.EVAL:\n tf.summary.scalar('eval_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\nif __name__ == '__main__':\n root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'\n train_data, eval_data, test_data, gt = data_load.load()\n TRAIN = False\n PREDICT = True\n DRAW_SAMPLE = False\n if DRAW_SAMPLE == True:\n pic = np.random.randint(len(test_data['x']))\n image_sample = test_data['x'][pic]\n label_sample = test_data['y'][pic]\n plt.figure(figsize=(20, 40))\n plt.title('data')\n plt.imshow(image_sample)\n plt.figure(figsize=(20, 40))\n plt.title('gt')\n plt.imshow(label_sample)\n pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from\n =os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),\n vars_to_warm_start=tf.get_collection(tf.GraphKeys.\n TRAINABLE_VARIABLES, scope='vgg16_pretrained'))\n fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir\n =os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)\n if TRAIN == True:\n for epoch in range(100):\n train_input_fn = tf.estimator.inputs.numpy_input_fn(x=\n train_data['x'], y=train_data['y'], batch_size=1,\n num_epochs=None, shuffle=True)\n fcn_segmentor.train(input_fn=train_input_fn, steps=200)\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[\n 'x'], y=eval_data['y'], num_epochs=1, batch_size=10,\n shuffle=False)\n eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)\n print('eval_loss :', eval_results)\n if PREDICT == True:\n pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],\n y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)\n pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))\n pred = [p['classes'] for p in pred]\n fig = plt.figure(1, figsize=(32, 16))\n for i, p in enumerate(pred):\n fig.add_subplot(3, 1, 1)\n plt.title('camera photo')\n plt.imshow(test_data['x'][i])\n fig.add_subplot(3, 1, 2)\n plt.title('prediction')\n plt.imshow(p)\n fig.add_subplot(3, 1, 3)\n plt.title('ground truth')\n plt.imshow(gt['test'][i])\n filename = 'pred_{}.png'.format(i)\n plt.savefig(os.path.join(root_dir, 'predictions', filename))\n",
"step-3": "<mask token>\ntf.logging.set_verbosity(tf.logging.INFO)\nnow = datetime.utcnow().strftime('%Y%m%d%H%M%S')\nroot_logdir = 'logs'\nlogdir = '{}/run-{}/'.format(root_logdir, now)\n\n\ndef fcn_model_fn(features, labels, mode):\n L2 = tf.contrib.layers.l2_regularizer(scale=0.1)\n trainable = False\n if mode == tf.estimator.ModeKeys.TRAIN:\n trainable = True\n seed = 2019\n with tf.name_scope('vgg16_pretrained'):\n x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',\n padding='same', name='conv1_1', kernel_regularizer=L2,\n trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_1')\n x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=\n 'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_1')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')\n with tf.name_scope('deconv_layers'):\n x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=\n 'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_1')\n x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=\n 'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_2')\n x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=\n 'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_3')\n heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32, \n 32), activation='linear', padding='same', name='deconv6_1',\n kernel_regularizer=L2, trainable=trainable)\n logit = tf.nn.sigmoid(heatmap, name='logit')\n pred = tf.to_int32(logit > 0.5)\n pred = tf.squeeze(pred, axis=3)\n predictions = {'classes': pred, 'probabilities': logit}\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n if False:\n logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))\n logit_f = tf.squeeze(logit_f, axis=[2, 3])\n label_f = tf.reshape(labels, (-1, 1))\n keep = tf.where(tf.greater_equal(labels, 0))\n logit_f = tf.gather(logit_f, keep)\n label_f = tf.gather(label_f, keep)\n tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=logit_f)\n heatmap = tf.squeeze(heatmap, axis=3)\n label_f = tf.to_int32(labels > 0)\n tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=heatmap)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,\n momentum=0.99)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.\n get_global_step())\n tf.summary.scalar('train_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=\n train_op)\n iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=\n 2, name='mean_iou')\n eval_metric_ops = {'IoU': iou}\n tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}\n tensors_to_log_iou = {'mean_iou': iou}\n tf.summary.scalar('mean_iou', iou[0])\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,\n every_n_iter=200)\n if mode == tf.estimator.ModeKeys.EVAL:\n tf.summary.scalar('eval_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\nif __name__ == '__main__':\n root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'\n train_data, eval_data, test_data, gt = data_load.load()\n TRAIN = False\n PREDICT = True\n DRAW_SAMPLE = False\n if DRAW_SAMPLE == True:\n pic = np.random.randint(len(test_data['x']))\n image_sample = test_data['x'][pic]\n label_sample = test_data['y'][pic]\n plt.figure(figsize=(20, 40))\n plt.title('data')\n plt.imshow(image_sample)\n plt.figure(figsize=(20, 40))\n plt.title('gt')\n plt.imshow(label_sample)\n pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from\n =os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),\n vars_to_warm_start=tf.get_collection(tf.GraphKeys.\n TRAINABLE_VARIABLES, scope='vgg16_pretrained'))\n fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir\n =os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)\n if TRAIN == True:\n for epoch in range(100):\n train_input_fn = tf.estimator.inputs.numpy_input_fn(x=\n train_data['x'], y=train_data['y'], batch_size=1,\n num_epochs=None, shuffle=True)\n fcn_segmentor.train(input_fn=train_input_fn, steps=200)\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[\n 'x'], y=eval_data['y'], num_epochs=1, batch_size=10,\n shuffle=False)\n eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)\n print('eval_loss :', eval_results)\n if PREDICT == True:\n pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],\n y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)\n pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))\n pred = [p['classes'] for p in pred]\n fig = plt.figure(1, figsize=(32, 16))\n for i, p in enumerate(pred):\n fig.add_subplot(3, 1, 1)\n plt.title('camera photo')\n plt.imshow(test_data['x'][i])\n fig.add_subplot(3, 1, 2)\n plt.title('prediction')\n plt.imshow(p)\n fig.add_subplot(3, 1, 3)\n plt.title('ground truth')\n plt.imshow(gt['test'][i])\n filename = 'pred_{}.png'.format(i)\n plt.savefig(os.path.join(root_dir, 'predictions', filename))\n",
"step-4": "<mask token>\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport data_load\nfrom datetime import datetime\ntf.logging.set_verbosity(tf.logging.INFO)\nnow = datetime.utcnow().strftime('%Y%m%d%H%M%S')\nroot_logdir = 'logs'\nlogdir = '{}/run-{}/'.format(root_logdir, now)\n\n\ndef fcn_model_fn(features, labels, mode):\n L2 = tf.contrib.layers.l2_regularizer(scale=0.1)\n trainable = False\n if mode == tf.estimator.ModeKeys.TRAIN:\n trainable = True\n seed = 2019\n with tf.name_scope('vgg16_pretrained'):\n x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',\n padding='same', name='conv1_1', kernel_regularizer=L2,\n trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_1')\n x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=\n 'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp1_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_1')\n x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=\n 'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp2_2')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_1')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_2')\n x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=\n 'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp3_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp4_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_1')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_2')\n x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=\n 'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp5_3')\n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')\n with tf.name_scope('deconv_layers'):\n x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=\n 'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_1')\n x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=\n 'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_2')\n x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=\n 'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)\n x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,\n name='dp6_3')\n heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32, \n 32), activation='linear', padding='same', name='deconv6_1',\n kernel_regularizer=L2, trainable=trainable)\n logit = tf.nn.sigmoid(heatmap, name='logit')\n pred = tf.to_int32(logit > 0.5)\n pred = tf.squeeze(pred, axis=3)\n predictions = {'classes': pred, 'probabilities': logit}\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n if False:\n logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))\n logit_f = tf.squeeze(logit_f, axis=[2, 3])\n label_f = tf.reshape(labels, (-1, 1))\n keep = tf.where(tf.greater_equal(labels, 0))\n logit_f = tf.gather(logit_f, keep)\n label_f = tf.gather(label_f, keep)\n tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=logit_f)\n heatmap = tf.squeeze(heatmap, axis=3)\n label_f = tf.to_int32(labels > 0)\n tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))\n tf.assert_non_negative(label_f)\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,\n logits=heatmap)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,\n momentum=0.99)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.\n get_global_step())\n tf.summary.scalar('train_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=\n train_op)\n iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=\n 2, name='mean_iou')\n eval_metric_ops = {'IoU': iou}\n tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}\n tensors_to_log_iou = {'mean_iou': iou}\n tf.summary.scalar('mean_iou', iou[0])\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,\n every_n_iter=200)\n if mode == tf.estimator.ModeKeys.EVAL:\n tf.summary.scalar('eval_loss', loss)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\nif __name__ == '__main__':\n root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'\n train_data, eval_data, test_data, gt = data_load.load()\n TRAIN = False\n PREDICT = True\n DRAW_SAMPLE = False\n if DRAW_SAMPLE == True:\n pic = np.random.randint(len(test_data['x']))\n image_sample = test_data['x'][pic]\n label_sample = test_data['y'][pic]\n plt.figure(figsize=(20, 40))\n plt.title('data')\n plt.imshow(image_sample)\n plt.figure(figsize=(20, 40))\n plt.title('gt')\n plt.imshow(label_sample)\n pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from\n =os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),\n vars_to_warm_start=tf.get_collection(tf.GraphKeys.\n TRAINABLE_VARIABLES, scope='vgg16_pretrained'))\n fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir\n =os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)\n if TRAIN == True:\n for epoch in range(100):\n train_input_fn = tf.estimator.inputs.numpy_input_fn(x=\n train_data['x'], y=train_data['y'], batch_size=1,\n num_epochs=None, shuffle=True)\n fcn_segmentor.train(input_fn=train_input_fn, steps=200)\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[\n 'x'], y=eval_data['y'], num_epochs=1, batch_size=10,\n shuffle=False)\n eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)\n print('eval_loss :', eval_results)\n if PREDICT == True:\n pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],\n y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)\n pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))\n pred = [p['classes'] for p in pred]\n fig = plt.figure(1, figsize=(32, 16))\n for i, p in enumerate(pred):\n fig.add_subplot(3, 1, 1)\n plt.title('camera photo')\n plt.imshow(test_data['x'][i])\n fig.add_subplot(3, 1, 2)\n plt.title('prediction')\n plt.imshow(p)\n fig.add_subplot(3, 1, 3)\n plt.title('ground truth')\n plt.imshow(gt['test'][i])\n filename = 'pred_{}.png'.format(i)\n plt.savefig(os.path.join(root_dir, 'predictions', filename))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 26 23:42:11 2018\n\n@author: pohsuanh\n\n\nFully Covolutional Network FCN-32s. \n\nFCN-32s network is based on VGG-16\n\n\"\"\"\n\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport data_load\nfrom datetime import datetime\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\n# assign each run to a separate log file, so the tensorboard can function properly. \nnow = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n\nroot_logdir = \"logs\"\n\nlogdir = \"{}/run-{}/\".format(root_logdir,now)\n\ndef fcn_model_fn(features, labels, mode):\n \n L2 = tf.contrib.layers.l2_regularizer(scale=0.1)\n \n trainable = False\n \n if mode == tf.estimator.ModeKeys.TRAIN :\n \n trainable = True\n \n seed = 2019\n \n with tf.name_scope(\"vgg16_pretrained\"):\n \n x = tf.layers.conv2d(features, 64, (3, 3),\n activation='relu',\n padding='same',\n name='conv1_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp1_1')\n \n x = tf.layers.conv2d(x, 64, (3, 3),\n activation='relu',\n padding='same',\n name='conv1_2',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp1_2')\n \n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')\n \n # Block 2\n x = tf.layers.conv2d(x, 128, (3, 3),\n activation='relu',\n padding='same',\n name='conv2_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp2_1')\n \n \n x = tf.layers.conv2d(x, 128, (3, 3),\n activation='relu',\n padding='same',\n name='conv2-2',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp2_2')\n \n \n x = tf.layers.max_pooling2d(x,(2, 2), strides=(2, 2), name='pool2')\n \n # Block 3\n x = tf.layers.conv2d (x, 256, (3, 3),\n activation='relu',\n padding='same',\n name='conv3_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_1')\n \n x = tf.layers.conv2d (x, 256, (3, 3),\n activation='relu',\n padding='same',\n name='conv3_2',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_2')\n \n \n x = tf.layers.conv2d (x, 256, (3, 3),\n activation='relu',\n padding='same',\n name='conv3_3',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_3')\n \n \n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')\n \n # Block 4\n x = tf.layers.conv2d (x, 512, (3, 3),\n activation='relu',\n padding='same',\n name='conv4_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_1')\n \n x = tf.layers.conv2d (x, 512, (3, 3),\n activation='relu',\n padding='same',\n name='conv4_2',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_2')\n \n x = tf.layers.conv2d (x, 512, (3, 3),\n activation='relu',\n padding='same',\n name='conv4_3',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_3')\n \n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')\n \n # Block 5\n x = tf.layers.conv2d (x, 512, (3, 3),\n activation='relu',\n padding='same',\n name='conv5_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_1')\n \n x = tf.layers.conv2d (x, 512, (3, 3),\n activation='relu',\n padding='same',\n name='conv5_2',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_2')\n \n x = tf.layers.conv2d (x, 512, (3, 3),\n activation='relu',\n padding='same',\n name='conv5_3',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_3')\n \n x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')\n \n with tf.name_scope(\"deconv_layers\"):\n # Block 6\n \n x = tf.layers.conv2d(x, 4096, (7,7), \n activation='relu',\n padding='same',\n name='conv6_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_1')\n \n x = tf.layers.conv2d(x, 4096, (1,1),\n activation='relu',\n padding='same',\n name='conv6_2',\n kernel_regularizer= L2,\n trainable = trainable)\n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_2')\n \n x = tf.layers.conv2d(x, 1, (1,1),\n activation='relu',\n padding='same',\n name='conv6_3',\n kernel_regularizer= L2,\n trainable = trainable) \n \n x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_3')\n \n # There are two classes [1: road, 0: non-road]\n heatmap = tf.layers.conv2d_transpose(x, 1, (64,64), strides=(32,32),\n activation='linear',\n padding='same',\n name='deconv6_1',\n kernel_regularizer= L2,\n trainable = trainable)\n \n logit = tf.nn.sigmoid(heatmap, name = 'logit')\n \n pred = tf.to_int32(logit > 0.5)\n \n pred = tf.squeeze(pred, axis = 3)\n\n# print(heatmap.shape)\n \n # Do pixel-wise classification :\n\n predictions = {\n \n # Generate predictions (for PREDICT and EVAL mode)\n \n \"classes\": pred, # tf.argmax(logit, axis =3 )\n \n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the logging_hook`.\n \n \"probabilities\": logit #tf.nn.softmax(logit, name=\"softmax_tensor\")\n\n }\n \n\n if mode == tf.estimator.ModeKeys.PREDICT:\n \n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n \n # Calculate Loss (for both TRAIN and EVAL modes)\n # Homework requires tf.nn.sigmoid_cross_entropy_with_logits()\n if False : \n # ignore where label is -1 , which corresponds to Void.\n \n logit_f = tf.reshape(heatmap, (-1,1,1,1)) # flatten the output\n \n logit_f = tf.squeeze(logit_f, axis = [2,3])\n \n label_f = tf.reshape(labels,(-1,1))\n \n keep = tf.where(tf.greater_equal(labels, 0) )\n \n logit_f = tf.gather(logit_f, keep)\n \n label_f = tf.gather(label_f, keep)\n \n tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])\n \n tf.assert_non_negative(label_f) # Void is labelled -1, which should be excluded from the loss func\n \n \n # sigmoid_cross_entorpy implements tf.nn.sparse_signoid_cross_entropy_with_logit, \n # it will convert output to logit in the op\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels = label_f, logits=logit_f)\n \n heatmap = tf.squeeze(heatmap, axis =3)\n\n label_f = tf.to_int32(labels > 0)\n\n tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))\n\n tf.assert_non_negative(label_f)\n\n loss = tf.losses.sigmoid_cross_entropy( multi_class_labels = label_f ,logits = heatmap) \n # Configure the trainable Op (for TRAIN mode)\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n \n optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum = 0.99)\n \n train_op = optimizer.minimize(loss=loss, global_step = tf.train.get_global_step())\n \n tf.summary.scalar('train_loss', loss)\n \n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n \n # Add evaluation metrics (for EVAL mode)\n \n # Set up logging for metrics\n \n iou = tf.metrics.mean_iou(label_f,predictions['classes'], num_classes = 2 , name = 'mean_iou')\n \n eval_metric_ops = {\"IoU\": iou}\n\n tensors_to_log_prob = {\"probabilities\": \"deconv_layers/logit\"}\n \n tensors_to_log_iou = {\"mean_iou\": iou}\n \n tf.summary.scalar('mean_iou', iou[0])\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log_iou, every_n_iter=200)\n \n if mode == tf.estimator.ModeKeys.EVAL :\n \n tf.summary.scalar('eval_loss', loss)\n\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops = eval_metric_ops)\n\n \n#%%\nif __name__ == \"__main__\":\n \n root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'\n\n # Load training and eval data\n \n train_data, eval_data, test_data, gt = data_load.load()\n \n # Flags\n \n TRAIN = False\n\n PREDICT = True \n\n DRAW_SAMPLE = False\n \n # Construct model\n if DRAW_SAMPLE == True :\n\n# pic = np.random.randint((test_data['x']).shape[0])\n pic = np.random.randint(len(test_data['x']))\n \n image_sample = test_data['x'][pic]\n \n label_sample = test_data['y'][pic]\n \n# image_sample = tf.Session().run(image_sample)\n# \n# label_sample = tf.Session().run(label_sample)\n plt.figure(figsize=(20,40))\n plt.title('data')\n plt.imshow(image_sample)\n \n plt.figure(figsize =(20,40))\n plt.title('gt')\n plt.imshow(label_sample)\n \n # Create the Estimator\n \n pretrained_weights = tf.estimator.WarmStartSettings(\n ckpt_to_initialize_from=os.path.join(root_dir,'pretrained_weights','vgg_16.ckpt'),\n vars_to_warm_start= tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'vgg16_pretrained'))\n \n fcn_segmentor = tf.estimator.Estimator(\n \n model_fn=fcn_model_fn, model_dir=os.path.join(root_dir, 'ckpts'), warm_start_from= pretrained_weights) \n \n if TRAIN == True :\n \n for epoch in range(100):\n \n # Train the model\n \n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x=train_data['x'],\n y=train_data['y'],\n batch_size=1,\n num_epochs=None, # number of epochs to iterate over data. If None will run forever.\n shuffle=True)\n \n fcn_segmentor.train(\n input_fn=train_input_fn,\n steps=200\n )\n \n # Evaluate the model and print results\n \n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x=eval_data['x'],\n y=eval_data['y'],\n num_epochs=1,\n batch_size=10,\n shuffle=False)\n \n eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)\n \n print('eval_loss :', eval_results)\n \n \n \n#%% We withhold the predction from test set untill all the hyperparameters are finetuned.\n \n if PREDICT == True :\n \n pred_input_fn = tf.estimator.inputs.numpy_input_fn(\n x=test_data['x'],\n y=test_data['y'],\n batch_size =1,\n num_epochs=1,\n shuffle=False)\n \n # predict method returns a generator\n \n pred = list( fcn_segmentor.predict(input_fn = pred_input_fn))\n \n pred = [p['classes'] for p in pred]\n \n fig = plt.figure(1, figsize=(32,16))\n \n for i, p in enumerate(pred) : \n \n fig.add_subplot(3,1,1)\n \n plt.title('camera photo')\n \n plt.imshow(test_data['x'][i])\n \n fig.add_subplot(3,1,2)\n \n plt.title('prediction')\n \n plt.imshow(p)\n \n fig.add_subplot(3,1,3)\n \n plt.title('ground truth')\n \n plt.imshow(gt['test'][i])\n \n filename = 'pred_{}.png'.format(i)\n \n plt.savefig(os.path.join(root_dir,'predictions',filename))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Background: The Fibonacci numbers are defined by F(n) = F(n-1) + F(n-2).
# There are different conventions on whether 0 is a Fibonacci number,
# and whether counting starts at n=0 or at n=1. Here, we will assume that
# 0 is not a Fibonacci number, and that counting starts at n=0,
# so F(0)=F(1)=1, and F(2)=2. With this in mind, write the function
# nthfibonaccinumber(n) that takes a non-negative int n and returns the nth Fibonacci number.
def fun_nthfibonaccinumber(n):
n1 = 1
n2 = 1
if n == 0:
return n2
else:
for i in range(0,n-1):
sum = n1 + n2
n1 = n2
n2 = sum
return n2
|
normal
|
{
"blob_id": "40744a8530df28f0bd8648900beb8a66e2d44cd0",
"index": 7730,
"step-1": "<mask token>\n",
"step-2": "def fun_nthfibonaccinumber(n):\n n1 = 1\n n2 = 1\n if n == 0:\n return n2\n else:\n for i in range(0, n - 1):\n sum = n1 + n2\n n1 = n2\n n2 = sum\n return n2\n",
"step-3": "# Background: The Fibonacci numbers are defined by F(n) = F(n-1) + F(n-2). \n# There are different conventions on whether 0 is a Fibonacci number, \n# and whether counting starts at n=0 or at n=1. Here, we will assume that \n# 0 is not a Fibonacci number, and that counting starts at n=0, \n# so F(0)=F(1)=1, and F(2)=2. With this in mind, write the function \n# nthfibonaccinumber(n) that takes a non-negative int n and returns the nth Fibonacci number.\n\n\n\ndef fun_nthfibonaccinumber(n):\n n1 = 1\n n2 = 1\n if n == 0:\n return n2\n else:\n for i in range(0,n-1):\n sum = n1 + n2\n n1 = n2\n n2 = sum\n return n2",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class ModelExtractor(object):
def __init__(self, modelzip):
self.modelzip = modelzip
def __enter__(self):
if not self.__is_model_good():
raise ValueError('Invalid model zip file')
obj = self.__get_obj_filename()
if obj is None:
raise ValueError('No obj file present in model zip')
self.path = mkdtemp()
try:
self.modelzip.extractall(self.path)
except:
raise ValueError('Error while extracting zip file')
return {'path': self.path, 'obj': join(self.path, obj)}
def __exit__(self, type, value, tb):
rmtree(self.path, ignore_errors=True)
<|reserved_special_token_0|>
def __get_obj_filename(self):
for info in self.modelzip.infolist():
if info.filename.endswith('.obj'):
return info.filename
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelExtractor(object):
def __init__(self, modelzip):
self.modelzip = modelzip
def __enter__(self):
if not self.__is_model_good():
raise ValueError('Invalid model zip file')
obj = self.__get_obj_filename()
if obj is None:
raise ValueError('No obj file present in model zip')
self.path = mkdtemp()
try:
self.modelzip.extractall(self.path)
except:
raise ValueError('Error while extracting zip file')
return {'path': self.path, 'obj': join(self.path, obj)}
def __exit__(self, type, value, tb):
rmtree(self.path, ignore_errors=True)
def __is_model_good(self):
total_size_uncompressed = 0
for path in self.modelzip.namelist():
if '..' in path or path.startswith('/'):
return False
info = self.modelzip.getinfo(path)
uncompressed_size = info.file_size
total_size_uncompressed += uncompressed_size
return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE
def __get_obj_filename(self):
for info in self.modelzip.infolist():
if info.filename.endswith('.obj'):
return info.filename
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MAX_UNCOMPRESSED_SIZE = 100000000.0
class ModelExtractor(object):
def __init__(self, modelzip):
self.modelzip = modelzip
def __enter__(self):
if not self.__is_model_good():
raise ValueError('Invalid model zip file')
obj = self.__get_obj_filename()
if obj is None:
raise ValueError('No obj file present in model zip')
self.path = mkdtemp()
try:
self.modelzip.extractall(self.path)
except:
raise ValueError('Error while extracting zip file')
return {'path': self.path, 'obj': join(self.path, obj)}
def __exit__(self, type, value, tb):
rmtree(self.path, ignore_errors=True)
def __is_model_good(self):
total_size_uncompressed = 0
for path in self.modelzip.namelist():
if '..' in path or path.startswith('/'):
return False
info = self.modelzip.getinfo(path)
uncompressed_size = info.file_size
total_size_uncompressed += uncompressed_size
return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE
def __get_obj_filename(self):
for info in self.modelzip.infolist():
if info.filename.endswith('.obj'):
return info.filename
return None
<|reserved_special_token_1|>
from tempfile import mkdtemp
from shutil import rmtree
from os.path import join
import os
MAX_UNCOMPRESSED_SIZE = 100000000.0
class ModelExtractor(object):
def __init__(self, modelzip):
self.modelzip = modelzip
def __enter__(self):
if not self.__is_model_good():
raise ValueError('Invalid model zip file')
obj = self.__get_obj_filename()
if obj is None:
raise ValueError('No obj file present in model zip')
self.path = mkdtemp()
try:
self.modelzip.extractall(self.path)
except:
raise ValueError('Error while extracting zip file')
return {'path': self.path, 'obj': join(self.path, obj)}
def __exit__(self, type, value, tb):
rmtree(self.path, ignore_errors=True)
def __is_model_good(self):
total_size_uncompressed = 0
for path in self.modelzip.namelist():
if '..' in path or path.startswith('/'):
return False
info = self.modelzip.getinfo(path)
uncompressed_size = info.file_size
total_size_uncompressed += uncompressed_size
return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE
def __get_obj_filename(self):
for info in self.modelzip.infolist():
if info.filename.endswith('.obj'):
return info.filename
return None
<|reserved_special_token_1|>
from tempfile import mkdtemp
from shutil import rmtree
from os.path import join
import os
MAX_UNCOMPRESSED_SIZE = 100e6 # 100MB
# Extracts a zipfile into a directory safely
class ModelExtractor(object):
def __init__(self, modelzip):
self.modelzip = modelzip
def __enter__(self):
if not self.__is_model_good():
raise ValueError('Invalid model zip file')
obj = self.__get_obj_filename()
if obj is None:
raise ValueError('No obj file present in model zip')
self.path = mkdtemp()
try:
self.modelzip.extractall(self.path)
except:
raise ValueError('Error while extracting zip file')
return {
'path': self.path,
'obj': join(self.path, obj)
}
def __exit__(self, type, value, tb):
rmtree(self.path, ignore_errors=True)
def __is_model_good(self):
total_size_uncompressed = 0
for path in self.modelzip.namelist():
if '..' in path or path.startswith('/'):
return False
info = self.modelzip.getinfo(path)
uncompressed_size = info.file_size
total_size_uncompressed += uncompressed_size
return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE
def __get_obj_filename(self):
for info in self.modelzip.infolist():
if info.filename.endswith('.obj'):
return info.filename
return None
|
flexible
|
{
"blob_id": "04670041dab49f8c2d4a0415030356e7ea92925f",
"index": 902,
"step-1": "<mask token>\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n <mask token>\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"step-2": "<mask token>\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"step-3": "<mask token>\nMAX_UNCOMPRESSED_SIZE = 100000000.0\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"step-4": "from tempfile import mkdtemp\nfrom shutil import rmtree\nfrom os.path import join\nimport os\nMAX_UNCOMPRESSED_SIZE = 100000000.0\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"step-5": "from tempfile import mkdtemp\nfrom shutil import rmtree\nfrom os.path import join\nimport os\n\nMAX_UNCOMPRESSED_SIZE = 100e6 # 100MB\n\n# Extracts a zipfile into a directory safely\nclass ModelExtractor(object):\n def __init__(self, modelzip):\n self.modelzip = modelzip \n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n\n self.path = mkdtemp()\n\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n\n return {\n 'path': self.path,\n 'obj': join(self.path, obj)\n }\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n\n info = self.modelzip.getinfo(path)\n\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n\n return None\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)
<|reserved_special_token_1|>
from text_to_word_cloud import *
from collections import Counter
from preprocess import *
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)
<|reserved_special_token_1|>
from text_to_word_cloud import *
from collections import Counter
from preprocess import *
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file("cloudofw.txt",cnt,len(words),call_R=True)
|
flexible
|
{
"blob_id": "b3bba1119bfaf0c1e684e8835259ec6fa8c42cf7",
"index": 1838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n data = load_data('train.json')\n words = text_to_words(get_all_text(data), as_set=False)\n cnt = Counter(words)\n save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)\n",
"step-3": "from text_to_word_cloud import *\nfrom collections import Counter\nfrom preprocess import *\nif __name__ == '__main__':\n data = load_data('train.json')\n words = text_to_words(get_all_text(data), as_set=False)\n cnt = Counter(words)\n save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)\n",
"step-4": "from text_to_word_cloud import *\r\nfrom collections import Counter\r\nfrom preprocess import *\r\n\r\n\r\nif __name__ == '__main__':\r\n data = load_data('train.json')\r\n words = text_to_words(get_all_text(data), as_set=False)\r\n cnt = Counter(words)\r\n save_il_to_word_cloud_file(\"cloudofw.txt\",cnt,len(words),call_R=True)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.urls import path
from django.conf.urls.i18n import urlpatterns
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('home', views.home, name='home'),
path('collab/', views.collab, name='collab'),
]
|
normal
|
{
"blob_id": "351963bee76ecaa9fa5c8d659f6d7c6ca9b22531",
"index": 2182,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('signup/', views.signup, name='signup'), path('home',\n views.home, name='home'), path('collab/', views.collab, name='collab')]\n",
"step-3": "from django.urls import path\nfrom django.conf.urls.i18n import urlpatterns\nfrom . import views\nurlpatterns = [path('signup/', views.signup, name='signup'), path('home',\n views.home, name='home'), path('collab/', views.collab, name='collab')]\n",
"step-4": "from django.urls import path\nfrom django.conf.urls.i18n import urlpatterns\n\nfrom . import views\n\nurlpatterns = [\n path('signup/', views.signup, name='signup'),\n path('home', views.home, name='home'),\n path('collab/', views.collab, name='collab'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from robot.libraries.BuiltIn import BuiltIn
from RoboGalaxyLibrary.utilitylib import logging as logger
import re
def block_no_keyword_warn():
pass
class Compare_hpMCTP(object):
def __init__(self):
self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')
def do(self, expect, actual, verbose=False):
def smart_compare(exp, act):
# Remove leading whitespaces
exp = (re.sub(r'^\s*', '', exp))
act = (re.sub(r'^\s*', '', act))
if verbose:
logger._log_to_console_and_log_file("expected after removing leading white space: %s" % exp)
logger._log_to_console_and_log_file("actual after removing leading white space: %s" % act)
missing = [e for e in exp if (e not in act) and (e is not '')]
extra = [a for a in act if (a not in exp)]
rc = 1 # True (good, until proven otherwise)
if extra:
logger._log_to_console_and_log_file("extra item found: %s" % extra)
rc = 0
else:
logger._log_to_console_and_log_file("No Extra found.")
if missing:
logger._log_to_console_and_log_file("missing item: %s" % missing)
rc = 0
else:
logger._log_to_console_and_log_file("No Missing found.")
return rc
# Need to delete some items.
actual = re.sub(r'\n\r', '\n', actual)
# get rid of the stuff from actual up to the first header. Extra info not compared.
# for example, the first three lines below.
# hpMCTP 2.3.0-4
# Copyright (c) 2015-2016 Hewlett-Packard - All Rights Reserved
# -------------------------------------------------------------
# <ISCSI-Boot-Cats>
headerEnd = actual.index('<ISCSI-Boot-Cats>')
actual = '\n' + actual[headerEnd:]
if verbose:
logger._log_to_console_and_log_file("Actual now: %s" % actual)
logger._log_to_console_and_log_file("Expect now: %s" % expect)
# Start comparing the expected vs the actual
# if as a string they match, then no need to do a smart compare
if expect == actual:
return logger._log_to_console_and_log_file("expect == actual. String equal, no further compare needed.")
else:
logger._log_to_console_and_log_file("expect != actual, will do smart compare")
# split into single lines.
eList = expect.split('\n')
aList = actual.split('\n')
logger._log_to_console_and_log_file("Split on: %s into %s sections" % ('\n', len(eList) - 1))
if len(aList) != len(eList):
errMsg = "aList and eList counts diff. Problem with split. a: %s, e: %s" % (len(aList) - 1, len(eList) - 1)
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
for i in xrange(1, len(eList)):
if eList[i] == aList[i]:
logger._log_to_console_and_log_file("Sections %s are equal." % i)
if verbose:
logger._log_to_console_and_log_file("expect: %s" % eList[i])
logger._log_to_console_and_log_file("actual: %s" % aList[i])
else:
logger._log_to_console_and_log_file("Section %s requires a smart compare." % i)
if verbose:
logger._log_to_console_and_log_file("expect: %s" % eList[i])
logger._log_to_console_and_log_file("actual: %s" % aList[i])
if not smart_compare(eList[i], aList[i]):
errMsg = "Expected: '%s' does not match '%s'" % (eList[i], aList[i])
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
|
normal
|
{
"blob_id": "17ba6aaa9009c258136b184ca6a8660cec1cfe40",
"index": 3752,
"step-1": "<mask token>\n\n\nclass Compare_hpMCTP(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Compare_hpMCTP(object):\n <mask token>\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-3": "<mask token>\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-4": "from robot.libraries.BuiltIn import BuiltIn\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nimport re\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-5": "from robot.libraries.BuiltIn import BuiltIn\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nimport re\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n\n # Remove leading whitespaces\n exp = (re.sub(r'^\\s*', '', exp))\n act = (re.sub(r'^\\s*', '', act))\n\n if verbose:\n logger._log_to_console_and_log_file(\"expected after removing leading white space: %s\" % exp)\n logger._log_to_console_and_log_file(\"actual after removing leading white space: %s\" % act)\n\n missing = [e for e in exp if (e not in act) and (e is not '')]\n extra = [a for a in act if (a not in exp)]\n\n rc = 1 # True (good, until proven otherwise)\n if extra:\n logger._log_to_console_and_log_file(\"extra item found: %s\" % extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file(\"No Extra found.\")\n\n if missing:\n logger._log_to_console_and_log_file(\"missing item: %s\" % missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file(\"No Missing found.\")\n\n return rc\n\n# Need to delete some items.\n actual = re.sub(r'\\n\\r', '\\n', actual)\n\n# get rid of the stuff from actual up to the first header. Extra info not compared.\n# for example, the first three lines below.\n# hpMCTP 2.3.0-4\n# Copyright (c) 2015-2016 Hewlett-Packard - All Rights Reserved\n# -------------------------------------------------------------\n# <ISCSI-Boot-Cats>\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n\n if verbose:\n logger._log_to_console_and_log_file(\"Actual now: %s\" % actual)\n logger._log_to_console_and_log_file(\"Expect now: %s\" % expect)\n\n# Start comparing the expected vs the actual\n # if as a string they match, then no need to do a smart compare\n if expect == actual:\n return logger._log_to_console_and_log_file(\"expect == actual. String equal, no further compare needed.\")\n\n else:\n logger._log_to_console_and_log_file(\"expect != actual, will do smart compare\")\n\n # split into single lines.\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file(\"Split on: %s into %s sections\" % ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = \"aList and eList counts diff. Problem with split. a: %s, e: %s\" % (len(aList) - 1, len(eList) - 1)\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\"Sections %s are equal.\" % i)\n if verbose:\n logger._log_to_console_and_log_file(\"expect: %s\" % eList[i])\n logger._log_to_console_and_log_file(\"actual: %s\" % aList[i])\n else:\n logger._log_to_console_and_log_file(\"Section %s requires a smart compare.\" % i)\n if verbose:\n logger._log_to_console_and_log_file(\"expect: %s\" % eList[i])\n logger._log_to_console_and_log_file(\"actual: %s\" % aList[i])\n\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class Compare_hpMCTP(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Compare_hpMCTP(object):
<|reserved_special_token_0|>
def do(self, expect, actual, verbose=False):
def smart_compare(exp, act):
exp = re.sub('^\\s*', '', exp)
act = re.sub('^\\s*', '', act)
if verbose:
logger._log_to_console_and_log_file(
'expected after removing leading white space: %s' % exp)
logger._log_to_console_and_log_file(
'actual after removing leading white space: %s' % act)
missing = [e for e in exp if e not in act and e is not '']
extra = [a for a in act if a not in exp]
rc = 1
if extra:
logger._log_to_console_and_log_file('extra item found: %s' %
extra)
rc = 0
else:
logger._log_to_console_and_log_file('No Extra found.')
if missing:
logger._log_to_console_and_log_file('missing item: %s' %
missing)
rc = 0
else:
logger._log_to_console_and_log_file('No Missing found.')
return rc
actual = re.sub('\\n\\r', '\n', actual)
headerEnd = actual.index('<ISCSI-Boot-Cats>')
actual = '\n' + actual[headerEnd:]
if verbose:
logger._log_to_console_and_log_file('Actual now: %s' % actual)
logger._log_to_console_and_log_file('Expect now: %s' % expect)
if expect == actual:
return logger._log_to_console_and_log_file(
'expect == actual. String equal, no further compare needed.')
else:
logger._log_to_console_and_log_file(
'expect != actual, will do smart compare')
eList = expect.split('\n')
aList = actual.split('\n')
logger._log_to_console_and_log_file('Split on: %s into %s sections' %
('\n', len(eList) - 1))
if len(aList) != len(eList):
errMsg = (
'aList and eList counts diff. Problem with split. a: %s, e: %s'
% (len(aList) - 1, len(eList) - 1))
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
for i in xrange(1, len(eList)):
if eList[i] == aList[i]:
logger._log_to_console_and_log_file(
'Sections %s are equal.' % i)
if verbose:
logger._log_to_console_and_log_file('expect: %s' % eList[i]
)
logger._log_to_console_and_log_file('actual: %s' % aList[i]
)
else:
logger._log_to_console_and_log_file(
'Section %s requires a smart compare.' % i)
if verbose:
logger._log_to_console_and_log_file('expect: %s' % eList[i]
)
logger._log_to_console_and_log_file('actual: %s' % aList[i]
)
if not smart_compare(eList[i], aList[i]):
errMsg = "Expected: '%s' does not match '%s'" % (eList[
i], aList[i])
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def block_no_keyword_warn():
pass
class Compare_hpMCTP(object):
def __init__(self):
self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')
def do(self, expect, actual, verbose=False):
def smart_compare(exp, act):
exp = re.sub('^\\s*', '', exp)
act = re.sub('^\\s*', '', act)
if verbose:
logger._log_to_console_and_log_file(
'expected after removing leading white space: %s' % exp)
logger._log_to_console_and_log_file(
'actual after removing leading white space: %s' % act)
missing = [e for e in exp if e not in act and e is not '']
extra = [a for a in act if a not in exp]
rc = 1
if extra:
logger._log_to_console_and_log_file('extra item found: %s' %
extra)
rc = 0
else:
logger._log_to_console_and_log_file('No Extra found.')
if missing:
logger._log_to_console_and_log_file('missing item: %s' %
missing)
rc = 0
else:
logger._log_to_console_and_log_file('No Missing found.')
return rc
actual = re.sub('\\n\\r', '\n', actual)
headerEnd = actual.index('<ISCSI-Boot-Cats>')
actual = '\n' + actual[headerEnd:]
if verbose:
logger._log_to_console_and_log_file('Actual now: %s' % actual)
logger._log_to_console_and_log_file('Expect now: %s' % expect)
if expect == actual:
return logger._log_to_console_and_log_file(
'expect == actual. String equal, no further compare needed.')
else:
logger._log_to_console_and_log_file(
'expect != actual, will do smart compare')
eList = expect.split('\n')
aList = actual.split('\n')
logger._log_to_console_and_log_file('Split on: %s into %s sections' %
('\n', len(eList) - 1))
if len(aList) != len(eList):
errMsg = (
'aList and eList counts diff. Problem with split. a: %s, e: %s'
% (len(aList) - 1, len(eList) - 1))
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
for i in xrange(1, len(eList)):
if eList[i] == aList[i]:
logger._log_to_console_and_log_file(
'Sections %s are equal.' % i)
if verbose:
logger._log_to_console_and_log_file('expect: %s' % eList[i]
)
logger._log_to_console_and_log_file('actual: %s' % aList[i]
)
else:
logger._log_to_console_and_log_file(
'Section %s requires a smart compare.' % i)
if verbose:
logger._log_to_console_and_log_file('expect: %s' % eList[i]
)
logger._log_to_console_and_log_file('actual: %s' % aList[i]
)
if not smart_compare(eList[i], aList[i]):
errMsg = "Expected: '%s' does not match '%s'" % (eList[
i], aList[i])
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
<|reserved_special_token_1|>
from robot.libraries.BuiltIn import BuiltIn
from RoboGalaxyLibrary.utilitylib import logging as logger
import re
def block_no_keyword_warn():
pass
class Compare_hpMCTP(object):
def __init__(self):
self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')
def do(self, expect, actual, verbose=False):
def smart_compare(exp, act):
exp = re.sub('^\\s*', '', exp)
act = re.sub('^\\s*', '', act)
if verbose:
logger._log_to_console_and_log_file(
'expected after removing leading white space: %s' % exp)
logger._log_to_console_and_log_file(
'actual after removing leading white space: %s' % act)
missing = [e for e in exp if e not in act and e is not '']
extra = [a for a in act if a not in exp]
rc = 1
if extra:
logger._log_to_console_and_log_file('extra item found: %s' %
extra)
rc = 0
else:
logger._log_to_console_and_log_file('No Extra found.')
if missing:
logger._log_to_console_and_log_file('missing item: %s' %
missing)
rc = 0
else:
logger._log_to_console_and_log_file('No Missing found.')
return rc
actual = re.sub('\\n\\r', '\n', actual)
headerEnd = actual.index('<ISCSI-Boot-Cats>')
actual = '\n' + actual[headerEnd:]
if verbose:
logger._log_to_console_and_log_file('Actual now: %s' % actual)
logger._log_to_console_and_log_file('Expect now: %s' % expect)
if expect == actual:
return logger._log_to_console_and_log_file(
'expect == actual. String equal, no further compare needed.')
else:
logger._log_to_console_and_log_file(
'expect != actual, will do smart compare')
eList = expect.split('\n')
aList = actual.split('\n')
logger._log_to_console_and_log_file('Split on: %s into %s sections' %
('\n', len(eList) - 1))
if len(aList) != len(eList):
errMsg = (
'aList and eList counts diff. Problem with split. a: %s, e: %s'
% (len(aList) - 1, len(eList) - 1))
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
for i in xrange(1, len(eList)):
if eList[i] == aList[i]:
logger._log_to_console_and_log_file(
'Sections %s are equal.' % i)
if verbose:
logger._log_to_console_and_log_file('expect: %s' % eList[i]
)
logger._log_to_console_and_log_file('actual: %s' % aList[i]
)
else:
logger._log_to_console_and_log_file(
'Section %s requires a smart compare.' % i)
if verbose:
logger._log_to_console_and_log_file('expect: %s' % eList[i]
)
logger._log_to_console_and_log_file('actual: %s' % aList[i]
)
if not smart_compare(eList[i], aList[i]):
errMsg = "Expected: '%s' does not match '%s'" % (eList[
i], aList[i])
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
<|reserved_special_token_1|>
from robot.libraries.BuiltIn import BuiltIn
from RoboGalaxyLibrary.utilitylib import logging as logger
import re
def block_no_keyword_warn():
pass
class Compare_hpMCTP(object):
def __init__(self):
self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')
def do(self, expect, actual, verbose=False):
def smart_compare(exp, act):
# Remove leading whitespaces
exp = (re.sub(r'^\s*', '', exp))
act = (re.sub(r'^\s*', '', act))
if verbose:
logger._log_to_console_and_log_file("expected after removing leading white space: %s" % exp)
logger._log_to_console_and_log_file("actual after removing leading white space: %s" % act)
missing = [e for e in exp if (e not in act) and (e is not '')]
extra = [a for a in act if (a not in exp)]
rc = 1 # True (good, until proven otherwise)
if extra:
logger._log_to_console_and_log_file("extra item found: %s" % extra)
rc = 0
else:
logger._log_to_console_and_log_file("No Extra found.")
if missing:
logger._log_to_console_and_log_file("missing item: %s" % missing)
rc = 0
else:
logger._log_to_console_and_log_file("No Missing found.")
return rc
# Need to delete some items.
actual = re.sub(r'\n\r', '\n', actual)
# get rid of the stuff from actual up to the first header. Extra info not compared.
# for example, the first three lines below.
# hpMCTP 2.3.0-4
# Copyright (c) 2015-2016 Hewlett-Packard - All Rights Reserved
# -------------------------------------------------------------
# <ISCSI-Boot-Cats>
headerEnd = actual.index('<ISCSI-Boot-Cats>')
actual = '\n' + actual[headerEnd:]
if verbose:
logger._log_to_console_and_log_file("Actual now: %s" % actual)
logger._log_to_console_and_log_file("Expect now: %s" % expect)
# Start comparing the expected vs the actual
# if as a string they match, then no need to do a smart compare
if expect == actual:
return logger._log_to_console_and_log_file("expect == actual. String equal, no further compare needed.")
else:
logger._log_to_console_and_log_file("expect != actual, will do smart compare")
# split into single lines.
eList = expect.split('\n')
aList = actual.split('\n')
logger._log_to_console_and_log_file("Split on: %s into %s sections" % ('\n', len(eList) - 1))
if len(aList) != len(eList):
errMsg = "aList and eList counts diff. Problem with split. a: %s, e: %s" % (len(aList) - 1, len(eList) - 1)
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
for i in xrange(1, len(eList)):
if eList[i] == aList[i]:
logger._log_to_console_and_log_file("Sections %s are equal." % i)
if verbose:
logger._log_to_console_and_log_file("expect: %s" % eList[i])
logger._log_to_console_and_log_file("actual: %s" % aList[i])
else:
logger._log_to_console_and_log_file("Section %s requires a smart compare." % i)
if verbose:
logger._log_to_console_and_log_file("expect: %s" % eList[i])
logger._log_to_console_and_log_file("actual: %s" % aList[i])
if not smart_compare(eList[i], aList[i]):
errMsg = "Expected: '%s' does not match '%s'" % (eList[i], aList[i])
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
|
flexible
|
{
"blob_id": "17ba6aaa9009c258136b184ca6a8660cec1cfe40",
"index": 3752,
"step-1": "<mask token>\n\n\nclass Compare_hpMCTP(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Compare_hpMCTP(object):\n <mask token>\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-3": "<mask token>\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-4": "from robot.libraries.BuiltIn import BuiltIn\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nimport re\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-5": "from robot.libraries.BuiltIn import BuiltIn\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nimport re\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n\n # Remove leading whitespaces\n exp = (re.sub(r'^\\s*', '', exp))\n act = (re.sub(r'^\\s*', '', act))\n\n if verbose:\n logger._log_to_console_and_log_file(\"expected after removing leading white space: %s\" % exp)\n logger._log_to_console_and_log_file(\"actual after removing leading white space: %s\" % act)\n\n missing = [e for e in exp if (e not in act) and (e is not '')]\n extra = [a for a in act if (a not in exp)]\n\n rc = 1 # True (good, until proven otherwise)\n if extra:\n logger._log_to_console_and_log_file(\"extra item found: %s\" % extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file(\"No Extra found.\")\n\n if missing:\n logger._log_to_console_and_log_file(\"missing item: %s\" % missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file(\"No Missing found.\")\n\n return rc\n\n# Need to delete some items.\n actual = re.sub(r'\\n\\r', '\\n', actual)\n\n# get rid of the stuff from actual up to the first header. Extra info not compared.\n# for example, the first three lines below.\n# hpMCTP 2.3.0-4\n# Copyright (c) 2015-2016 Hewlett-Packard - All Rights Reserved\n# -------------------------------------------------------------\n# <ISCSI-Boot-Cats>\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n\n if verbose:\n logger._log_to_console_and_log_file(\"Actual now: %s\" % actual)\n logger._log_to_console_and_log_file(\"Expect now: %s\" % expect)\n\n# Start comparing the expected vs the actual\n # if as a string they match, then no need to do a smart compare\n if expect == actual:\n return logger._log_to_console_and_log_file(\"expect == actual. String equal, no further compare needed.\")\n\n else:\n logger._log_to_console_and_log_file(\"expect != actual, will do smart compare\")\n\n # split into single lines.\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file(\"Split on: %s into %s sections\" % ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = \"aList and eList counts diff. Problem with split. a: %s, e: %s\" % (len(aList) - 1, len(eList) - 1)\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\"Sections %s are equal.\" % i)\n if verbose:\n logger._log_to_console_and_log_file(\"expect: %s\" % eList[i])\n logger._log_to_console_and_log_file(\"actual: %s\" % aList[i])\n else:\n logger._log_to_console_and_log_file(\"Section %s requires a smart compare.\" % i)\n if verbose:\n logger._log_to_console_and_log_file(\"expect: %s\" % eList[i])\n logger._log_to_console_and_log_file(\"actual: %s\" % aList[i])\n\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonacciModified(t1, t2, n):
ti = t1
ti_1 = t2
for i in range(2, n):
ti_2 = ti + ti_1 ** 2
ti = ti_1
ti_1 = ti_2
return ti_2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonacciModified(t1, t2, n):
ti = t1
ti_1 = t2
for i in range(2, n):
ti_2 = ti + ti_1 ** 2
ti = ti_1
ti_1 = ti_2
return ti_2
if __name__ == '__main__':
t1, t2, n = input().strip().split(' ')
t1, t2, n = [int(t1), int(t2), int(n)]
result = fibonacciModified(t1, t2, n)
print(result)
<|reserved_special_token_1|>
import sys
def fibonacciModified(t1, t2, n):
ti = t1
ti_1 = t2
for i in range(2, n):
ti_2 = ti + ti_1 ** 2
ti = ti_1
ti_1 = ti_2
return ti_2
if __name__ == '__main__':
t1, t2, n = input().strip().split(' ')
t1, t2, n = [int(t1), int(t2), int(n)]
result = fibonacciModified(t1, t2, n)
print(result)
<|reserved_special_token_1|>
#!/bin/python3
import sys
def fibonacciModified(t1, t2, n):
ti = t1
ti_1 = t2
for i in range (2, n):
ti_2 = ti + ti_1**2
ti = ti_1
ti_1 = ti_2
return ti_2
if __name__ == "__main__":
t1, t2, n = input().strip().split(' ')
t1, t2, n = [int(t1), int(t2), int(n)]
result = fibonacciModified(t1, t2, n)
print(result)
|
flexible
|
{
"blob_id": "3838df627318b25767738da912f44e494cef40f3",
"index": 6833,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range(2, n):\n ti_2 = ti + ti_1 ** 2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range(2, n):\n ti_2 = ti + ti_1 ** 2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\n\nif __name__ == '__main__':\n t1, t2, n = input().strip().split(' ')\n t1, t2, n = [int(t1), int(t2), int(n)]\n result = fibonacciModified(t1, t2, n)\n print(result)\n",
"step-4": "import sys\n\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range(2, n):\n ti_2 = ti + ti_1 ** 2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\n\nif __name__ == '__main__':\n t1, t2, n = input().strip().split(' ')\n t1, t2, n = [int(t1), int(t2), int(n)]\n result = fibonacciModified(t1, t2, n)\n print(result)\n",
"step-5": "#!/bin/python3\n\nimport sys\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range (2, n):\n ti_2 = ti + ti_1**2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\nif __name__ == \"__main__\":\n t1, t2, n = input().strip().split(' ')\n t1, t2, n = [int(t1), int(t2), int(n)]\n result = fibonacciModified(t1, t2, n)\n print(result)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
# __author__ = 'lyl'
import json
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def read_json(filename):
"""
读取json格式的文件
:param filename: json文件的文件名
:return: [{}, {}, {}, {}, {},{} ......]
"""
return json.loads(open(filename).read())
def write_csv(filename, data_list):
"""
将python对象 [{}, {}. {}, {} ...] 写入到csv文件中
:param filename: 生成的csv文件名
:param data_list: [{}, {}. {}, {} ...]
:return: None
"""
with open(filename,'w') as f:
dict_writer = csv.DictWriter(f, data_list[0].keys())
dict_writer.writeheader()
dict_writer.writerows(data_list)
def write_csv2(filename, content_list):
"""
与 write_csv 类似
:param filename:
:param content_list:
:return:
"""
with open(filename, 'w') as f:
csv_writer = csv.writer(f)
head_list = content_list[0].keys()
data_list = [content.values() for content in content_list]
csv_writer.writerow(head_list)
csv_writer.writerows(data_list)
if __name__ == "__main__":
# 读出json数据内容
content_list = read_json('lagou_info_lin3.json')
# 将数据写入到csv文件
write_csv( "lagou_info_lin3.csv", content_list)
|
normal
|
{
"blob_id": "7531480f629c1b3d28210afac4ef84b06edcd420",
"index": 3825,
"step-1": "<mask token>\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\n<mask token>\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\nif __name__ == '__main__':\n content_list = read_json('lagou_info_lin3.json')\n write_csv('lagou_info_lin3.csv', content_list)\n",
"step-4": "import json\nimport csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\nif __name__ == '__main__':\n content_list = read_json('lagou_info_lin3.json')\n write_csv('lagou_info_lin3.csv', content_list)\n",
"step-5": "# coding=utf-8\r\n# __author__ = 'lyl'\r\n\r\nimport json\r\nimport csv\r\n\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n\r\ndef read_json(filename):\r\n \"\"\"\r\n 读取json格式的文件\r\n :param filename: json文件的文件名\r\n :return: [{}, {}, {}, {}, {},{} ......]\r\n \"\"\"\r\n return json.loads(open(filename).read())\r\n\r\ndef write_csv(filename, data_list):\r\n \"\"\"\r\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\r\n :param filename: 生成的csv文件名\r\n :param data_list: [{}, {}. {}, {} ...]\r\n :return: None\r\n \"\"\"\r\n with open(filename,'w') as f:\r\n dict_writer = csv.DictWriter(f, data_list[0].keys())\r\n dict_writer.writeheader()\r\n dict_writer.writerows(data_list)\r\n\r\n\r\ndef write_csv2(filename, content_list):\r\n \"\"\"\r\n 与 write_csv 类似\r\n :param filename:\r\n :param content_list:\r\n :return:\r\n \"\"\"\r\n with open(filename, 'w') as f:\r\n csv_writer = csv.writer(f)\r\n\r\n head_list = content_list[0].keys()\r\n data_list = [content.values() for content in content_list]\r\n csv_writer.writerow(head_list)\r\n csv_writer.writerows(data_list)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # 读出json数据内容\r\n content_list = read_json('lagou_info_lin3.json')\r\n # 将数据写入到csv文件\r\n write_csv( \"lagou_info_lin3.csv\", content_list)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class AIns:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CIns:
comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',
'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':
'110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':
'011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':
'110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':
'010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':
'000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}
jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':
'101', 'JLE': '110', 'JMP': '111'}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a = '0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError('Wrong comp instruction')
if 'M' in self.comp:
a = '1'
ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError(
"CInstruction binary contruction error, command was '{}'".
format(self.raw_instruction))
return ret
<|reserved_special_token_0|>
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5':
5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,
'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,
'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ParseError(Exception):
pass
def remove_inline_comments(text):
ret = []
in_comment_block = False
p = 0
while True:
if (op := text.find('/*', p)) > 0:
in_comment_block = True
if op != p:
ret.append(text[p:op])
p = op + 2
else:
ret.append(text[p:])
break
if (op := text.find('*/', p)) > 0:
p = op + 2
in_comment_block = False
continue
else:
break
if in_comment_block:
exit(2)
return ''.join(ret)
def remove_comments(contents):
ret = []
for l in contents:
lstrip = l.strip()
if lstrip.startswith('//'):
continue
if (com := lstrip.find('//')) > 0:
ret.append(l[0:com])
continue
ret.append(l)
return ret
class AIns:
def __init__(self, token, symbols):
if token.isdecimal():
self.value = int(token)
if self.value > 2 ** 15 - 1:
raise ParseError('A instruction value is too high')
elif token in symbols.symbols:
self.value = symbols.symbols[token]
else:
symbols.add(token)
self.value = symbols.symbols[token]
def get_binary(self):
return '0{:015b}'.format(self.value)
class CIns:
comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',
'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':
'110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':
'011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':
'110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':
'010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':
'000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}
jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':
'101', 'JLE': '110', 'JMP': '111'}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a = '0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError('Wrong comp instruction')
if 'M' in self.comp:
a = '1'
ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError(
"CInstruction binary contruction error, command was '{}'".
format(self.raw_instruction))
return ret
<|reserved_special_token_0|>
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5':
5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,
'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,
'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ParseError(Exception):
pass
def remove_inline_comments(text):
ret = []
in_comment_block = False
p = 0
while True:
if (op := text.find('/*', p)) > 0:
in_comment_block = True
if op != p:
ret.append(text[p:op])
p = op + 2
else:
ret.append(text[p:])
break
if (op := text.find('*/', p)) > 0:
p = op + 2
in_comment_block = False
continue
else:
break
if in_comment_block:
exit(2)
return ''.join(ret)
def remove_comments(contents):
ret = []
for l in contents:
lstrip = l.strip()
if lstrip.startswith('//'):
continue
if (com := lstrip.find('//')) > 0:
ret.append(l[0:com])
continue
ret.append(l)
return ret
class AIns:
def __init__(self, token, symbols):
if token.isdecimal():
self.value = int(token)
if self.value > 2 ** 15 - 1:
raise ParseError('A instruction value is too high')
elif token in symbols.symbols:
self.value = symbols.symbols[token]
else:
symbols.add(token)
self.value = symbols.symbols[token]
def get_binary(self):
return '0{:015b}'.format(self.value)
class CIns:
comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',
'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':
'110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':
'011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':
'110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':
'010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':
'000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}
jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':
'101', 'JLE': '110', 'JMP': '111'}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a = '0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError('Wrong comp instruction')
if 'M' in self.comp:
a = '1'
ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError(
"CInstruction binary contruction error, command was '{}'".
format(self.raw_instruction))
return ret
def parse(contents, symbols):
ret = []
for l in contents:
ls = l.strip()
if ls.startswith('@'):
ret.append(AIns(ls[1:], symbols))
else:
ret.append(CIns(ls))
return ret
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5':
5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,
'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,
'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ParseError(Exception):
pass
def remove_inline_comments(text):
ret = []
in_comment_block = False
p = 0
while True:
if (op := text.find('/*', p)) > 0:
in_comment_block = True
if op != p:
ret.append(text[p:op])
p = op + 2
else:
ret.append(text[p:])
break
if (op := text.find('*/', p)) > 0:
p = op + 2
in_comment_block = False
continue
else:
break
if in_comment_block:
exit(2)
return ''.join(ret)
def remove_comments(contents):
ret = []
for l in contents:
lstrip = l.strip()
if lstrip.startswith('//'):
continue
if (com := lstrip.find('//')) > 0:
ret.append(l[0:com])
continue
ret.append(l)
return ret
class AIns:
def __init__(self, token, symbols):
if token.isdecimal():
self.value = int(token)
if self.value > 2 ** 15 - 1:
raise ParseError('A instruction value is too high')
elif token in symbols.symbols:
self.value = symbols.symbols[token]
else:
symbols.add(token)
self.value = symbols.symbols[token]
def get_binary(self):
return '0{:015b}'.format(self.value)
class CIns:
comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',
'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':
'110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':
'011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':
'110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':
'010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':
'000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}
jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':
'101', 'JLE': '110', 'JMP': '111'}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a = '0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError('Wrong comp instruction')
if 'M' in self.comp:
a = '1'
ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError(
"CInstruction binary contruction error, command was '{}'".
format(self.raw_instruction))
return ret
def parse(contents, symbols):
ret = []
for l in contents:
ls = l.strip()
if ls.startswith('@'):
ret.append(AIns(ls[1:], symbols))
else:
ret.append(CIns(ls))
return ret
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5':
5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,
'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,
'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
def main():
if len(sys.argv) < 1:
exit(1)
filename = sys.argv[1]
contents = []
with open(filename) as f:
text = f.read()
contents = remove_inline_comments(text).split('\n')
contents = filter(None, remove_comments(contents))
symbols = Symbols()
contents = symbols.fill_with_labels(contents)
parsed = parse(contents, symbols)
out_filename = '{}.hack'.format(os.path.splitext(filename)[0])
with open(out_filename, 'w') as f:
for i in parsed:
try:
f.write('{}\n'.format(i.get_binary()))
except ParseError as e:
print(e)
exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/python
import sys
import os
class ParseError(Exception):
pass
def remove_inline_comments(text):
ret = []
in_comment_block = False
p = 0
while True:
if (op := text.find('/*', p)) > 0:
in_comment_block = True
if op != p:
ret.append(text[p:op])
p = op + 2
else:
ret.append(text[p:])
break
if (op := text.find('*/', p)) > 0:
p = op + 2
in_comment_block = False
continue
else:
break
if in_comment_block:
exit(2)
return ''.join(ret)
def remove_comments(contents):
ret = []
for l in contents:
lstrip = l.strip()
if lstrip.startswith('//'):
continue
if (com := lstrip.find('//')) > 0:
ret.append(l[0:com])
continue
ret.append(l)
return ret
class AIns:
def __init__(self, token, symbols):
if token.isdecimal():
self.value = int(token)
if self.value > 2**15-1:
raise ParseError("A instruction value is too high")
elif token in symbols.symbols:
self.value = symbols.symbols[token]
else:
symbols.add(token)
self.value = symbols.symbols[token]
def get_binary(self):
return "0{:015b}".format(self.value)
class CIns:
comp = {
'0': '101010',
'1': '111111',
'-1': '111010',
'D': '001100',
'A': '110000',
'M': '110000',
'!D': '001101',
'!A': '110001',
'!M': '110001',
'-D': '001111',
'-A': '110011',
'-M': '110011',
'D+1': '011111',
'A+1': '110111',
'M+1': '110111',
'D-1': '001110',
'A-1': '110010',
'M-1': '110010',
'D+A': '000010',
'D+M': '000010',
'D-A': '010011',
'D-M': '010011',
'A-D': '000111',
'M-D': '000111',
'D&A': '000000',
'D&M': '000000',
'D|A': '010101',
'D|M': '010101',
}
jmp = {
'JGT': '001',
'JEQ': '010',
'JGE': '011',
'JLT': '100',
'JNE': '101',
'JLE': '110',
'JMP': '111',
}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a='0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError("Wrong comp instruction")
if 'M' in self.comp:
a = '1'
ret = "{}{}{}{}{}".format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError("CInstruction binary contruction error, command was '{}'".format(self.raw_instruction))
return ret
def parse(contents, symbols):
ret = []
for l in contents:
ls = l.strip()
if ls.startswith('@'):
ret.append(AIns(ls[1:], symbols))
else:
ret.append(CIns(ls))
return ret
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {
'R0': 0,
'R1': 1,
'R2': 2,
'R3': 3,
'R4': 4,
'R5': 5,
'R6': 6,
'R7': 7,
'R8': 8,
'R9': 9,
'R10': 10,
'R11': 11,
'R12': 12,
'R13': 13,
'R14': 14,
'R15': 15,
'SCREEN': 16384,
'KBD': 24576,
'SP': 0,
'LCL': 1,
'ARG': 2,
'THIS': 3,
'THAT': 4,
}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
def main():
if len(sys.argv) < 1:
exit(1)
filename = sys.argv[1]
contents = []
with open(filename) as f:
text = f.read()
contents = (remove_inline_comments(text)).split('\n')
contents = filter(None, remove_comments(contents))
symbols = Symbols()
contents = symbols.fill_with_labels(contents)
parsed = parse(contents, symbols)
out_filename = "{}.hack".format(os.path.splitext(filename)[0])
with open(out_filename, 'w') as f:
for i in parsed:
try:
f.write("{}\n".format(i.get_binary()))
except ParseError as e:
print(e)
exit(1)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "11e9e4dd5c9c6158fed40080d4cc221f28a0eba0",
"index": 8097,
"step-1": "<mask token>\n\n\nclass AIns:\n <mask token>\n <mask token>\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\n<mask token>\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n if in_comment_block:\n exit(2)\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n ret.append(l)\n return ret\n\n\nclass AIns:\n\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2 ** 15 - 1:\n raise ParseError('A instruction value is too high')\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return '0{:015b}'.format(self.value)\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\n<mask token>\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n if in_comment_block:\n exit(2)\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n ret.append(l)\n return ret\n\n\nclass AIns:\n\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2 ** 15 - 1:\n raise ParseError('A instruction value is too high')\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return '0{:015b}'.format(self.value)\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\ndef parse(contents, symbols):\n ret = []\n for l in contents:\n ls = l.strip()\n if ls.startswith('@'):\n ret.append(AIns(ls[1:], symbols))\n else:\n ret.append(CIns(ls))\n return ret\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n if in_comment_block:\n exit(2)\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n ret.append(l)\n return ret\n\n\nclass AIns:\n\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2 ** 15 - 1:\n raise ParseError('A instruction value is too high')\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return '0{:015b}'.format(self.value)\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\ndef parse(contents, symbols):\n ret = []\n for l in contents:\n ls = l.strip()\n if ls.startswith('@'):\n ret.append(AIns(ls[1:], symbols))\n else:\n ret.append(CIns(ls))\n return ret\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\ndef main():\n if len(sys.argv) < 1:\n exit(1)\n filename = sys.argv[1]\n contents = []\n with open(filename) as f:\n text = f.read()\n contents = remove_inline_comments(text).split('\\n')\n contents = filter(None, remove_comments(contents))\n symbols = Symbols()\n contents = symbols.fill_with_labels(contents)\n parsed = parse(contents, symbols)\n out_filename = '{}.hack'.format(os.path.splitext(filename)[0])\n with open(out_filename, 'w') as f:\n for i in parsed:\n try:\n f.write('{}\\n'.format(i.get_binary()))\n except ParseError as e:\n print(e)\n exit(1)\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python\n\nimport sys\nimport os\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n\n if in_comment_block:\n exit(2)\n\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n\n ret.append(l)\n\n return ret\n\n\nclass AIns:\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2**15-1:\n raise ParseError(\"A instruction value is too high\")\n\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return \"0{:015b}\".format(self.value)\n\n\nclass CIns:\n\n comp = {\n '0': '101010',\n '1': '111111',\n '-1': '111010',\n 'D': '001100',\n 'A': '110000',\n 'M': '110000',\n '!D': '001101',\n '!A': '110001',\n '!M': '110001',\n '-D': '001111',\n '-A': '110011',\n '-M': '110011',\n 'D+1': '011111',\n 'A+1': '110111',\n 'M+1': '110111',\n 'D-1': '001110',\n 'A-1': '110010',\n 'M-1': '110010',\n 'D+A': '000010',\n 'D+M': '000010',\n 'D-A': '010011',\n 'D-M': '010011',\n 'A-D': '000111',\n 'M-D': '000111',\n 'D&A': '000000',\n 'D&M': '000000',\n 'D|A': '010101',\n 'D|M': '010101',\n }\n\n jmp = {\n 'JGT': '001',\n 'JEQ': '010',\n 'JGE': '011',\n 'JLT': '100',\n 'JNE': '101',\n 'JLE': '110',\n 'JMP': '111',\n }\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a='0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError(\"Wrong comp instruction\")\n\n if 'M' in self.comp:\n a = '1'\n\n ret = \"{}{}{}{}{}\".format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\"CInstruction binary contruction error, command was '{}'\".format(self.raw_instruction))\n return ret\n\n\ndef parse(contents, symbols):\n ret = []\n for l in contents:\n ls = l.strip()\n if ls.startswith('@'):\n ret.append(AIns(ls[1:], symbols))\n else:\n ret.append(CIns(ls))\n\n return ret\n\n\nclass Symbols:\n def __init__(self):\n self.memptr = 16\n self.symbols = {\n 'R0': 0,\n 'R1': 1,\n 'R2': 2,\n 'R3': 3,\n 'R4': 4,\n 'R5': 5,\n 'R6': 6,\n 'R7': 7,\n 'R8': 8,\n 'R9': 9,\n 'R10': 10,\n 'R11': 11,\n 'R12': 12,\n 'R13': 13,\n 'R14': 14,\n 'R15': 15,\n 'SCREEN': 16384,\n 'KBD': 24576,\n 'SP': 0,\n 'LCL': 1,\n 'ARG': 2,\n 'THIS': 3,\n 'THAT': 4,\n }\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\ndef main():\n if len(sys.argv) < 1:\n exit(1)\n\n filename = sys.argv[1]\n\n contents = []\n with open(filename) as f:\n text = f.read()\n\n contents = (remove_inline_comments(text)).split('\\n')\n\n contents = filter(None, remove_comments(contents))\n\n symbols = Symbols()\n contents = symbols.fill_with_labels(contents)\n\n parsed = parse(contents, symbols)\n\n out_filename = \"{}.hack\".format(os.path.splitext(filename)[0])\n\n with open(out_filename, 'w') as f:\n for i in parsed:\n try:\n f.write(\"{}\\n\".format(i.get_binary()))\n except ParseError as e:\n print(e)\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
9,
14,
15,
16,
19
]
}
|
[
9,
14,
15,
16,
19
] |
"""
Utilities used by other modules.
"""
import csv
import datetime
import hashlib
import json
import re
import string
import subprocess
import uuid
import xml.etree.ElementTree as ET
from alta import ConfigurationFromYamlFile
from pkg_resources import resource_filename
from ..__details__ import __appname__
from appdirs import *
from comoda import ensure_dir
from shutil import copyfile
SAMPLES_WITHOUT_BARCODES = [2, 8]
DEFAULT_INDEX_CYCLES = dict(index='8', index1='8')
PROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
def get_md5(file_handle):
hasher = hashlib.md5()
for chunk in read_chunks(file_handle):
hasher.update(chunk)
return hasher.hexdigest()
def check_progress_status(root_path, started_file, completed_file):
localroot, dirnames, filenames = os.walk(root_path).next()
if started_file not in filenames:
return PROGRESS_STATUS.get('TODO')
elif completed_file not in filenames:
return PROGRESS_STATUS.get('STARTED')
else:
started_file = os.path.join(root_path, started_file)
completed_file = os.path.join(root_path, completed_file)
if os.path.getmtime(started_file) > os.path.getmtime(completed_file):
return PROGRESS_STATUS.get('STARTED')
return PROGRESS_STATUS.get('COMPLETED')
def runJob(cmd, logger):
try:
# subprocess.check_output(cmd)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
ret = process.wait()
return True
except subprocess.CalledProcessError as e:
logger.info(e)
if e.output:
logger.info("command output: %s", e.output)
else:
logger.info("no command output available")
return False
|
normal
|
{
"blob_id": "b16c847912944e0563492d35768b5b5bf3a506c7",
"index": 1569,
"step-1": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\n<mask token>\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n logger.debug('config file paths: {}'.format(config_file_paths))\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\n<mask token>\n",
"step-4": "<mask token>\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\nPROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\ndef get_conf(logger, config_file_from_cli=None, profile=None):\n profiles = {'presta': 'presta_config.yml', 'celery': 'celery_config.yml'}\n default_config_file_label = profiles.get(profile, profiles['presta'])\n config_file_path = config_file_setup(logger, default_config_file_label,\n cf_from_cli=config_file_from_cli)\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n\n def file_missing(path, logger, force):\n msg = \"path - {} - doesn't exists\".format(path)\n if force:\n logger.error(msg)\n sys.exit()\n logger.warning(msg)\n return False\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(\n path, logger, force)\n\n\ndef sanitize_filename(filename):\n valid_chars = '-_.%s%s' % (string.ascii_letters, string.digits)\n return ''.join(c for c in filename if c in valid_chars)\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n logger.debug('config file paths: {}'.format(config_file_paths))\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\ndef read_chunks(file_handle, chunk_size=8192):\n while True:\n data = file_handle.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef get_md5(file_handle):\n hasher = hashlib.md5()\n for chunk in read_chunks(file_handle):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef check_progress_status(root_path, started_file, completed_file):\n localroot, dirnames, filenames = os.walk(root_path).next()\n if started_file not in filenames:\n return PROGRESS_STATUS.get('TODO')\n elif completed_file not in filenames:\n return PROGRESS_STATUS.get('STARTED')\n else:\n started_file = os.path.join(root_path, started_file)\n completed_file = os.path.join(root_path, completed_file)\n if os.path.getmtime(started_file) > os.path.getmtime(completed_file):\n return PROGRESS_STATUS.get('STARTED')\n return PROGRESS_STATUS.get('COMPLETED')\n\n\ndef runJob(cmd, logger):\n try:\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n output = process.communicate()[0]\n ret = process.wait()\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info('command output: %s', e.output)\n else:\n logger.info('no command output available')\n return False\n",
"step-5": "\"\"\"\nUtilities used by other modules.\n\"\"\"\n\nimport csv\nimport datetime\nimport hashlib\nimport json\nimport re\nimport string\nimport subprocess\nimport uuid\n\nimport xml.etree.ElementTree as ET\nfrom alta import ConfigurationFromYamlFile\nfrom pkg_resources import resource_filename\nfrom ..__details__ import __appname__\nfrom appdirs import *\nfrom comoda import ensure_dir\nfrom shutil import copyfile\n\n\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\nPROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item[\"IsIndexedRead\"] == \"Y\", reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(\n index=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] == \"2\"), None),\n index1=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] != \"2\"), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n\n for read in self.root.iter('Read'):\n if read.attrib[\"IsIndexedRead\"] == \"Y\":\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item[\"IsIndexedRead\"] == \"N\", reads)\n\n if len(reads) == 1:\n return False\n\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip()) # ms-dos\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)\n\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n else:\n if f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row['index1'])\n\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(\n index=index,\n index1=index1,\n )\n else:\n if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n\n return barcodes_mask\n\n\nclass WeightedPath(object):\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__,\n self.path,\n self.weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\ndef get_conf(logger, config_file_from_cli=None, profile=None):\n profiles = {'presta': 'presta_config.yml',\n 'celery': 'celery_config.yml'}\n default_config_file_label = profiles.get(profile, profiles['presta'])\n\n config_file_path = config_file_setup(logger, default_config_file_label,\n cf_from_cli=config_file_from_cli)\n\n # Load YAML configuration file\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n def file_missing(path, logger, force):\n msg = \"path - {} - doesn't exists\".format(path)\n if force:\n logger.error(msg)\n sys.exit()\n logger.warning(msg)\n return False\n\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,\n logger,\n force)\n\n\ndef sanitize_filename(filename):\n valid_chars = \"-_.%s%s\" % (string.ascii_letters, string.digits)\n return ''.join(c for c in filename if c in valid_chars)\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):\n filename = sanitize_filename(sample_label)\n\n if read:\n filename = '_'.join(\n [filename, lane, read]) if lane else '_'.join(\n [filename, read])\n\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n\n if ext:\n filename = '.'.join([filename, ext])\n\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n\n logger.debug(\"config file paths: {}\".format(config_file_paths))\n\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error(\"While touching {} file: {}\".format(path, e.strerror))\n\n\ndef read_chunks(file_handle, chunk_size=8192):\n while True:\n data = file_handle.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef get_md5(file_handle):\n hasher = hashlib.md5()\n for chunk in read_chunks(file_handle):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef check_progress_status(root_path, started_file, completed_file):\n localroot, dirnames, filenames = os.walk(root_path).next()\n\n if started_file not in filenames:\n return PROGRESS_STATUS.get('TODO')\n elif completed_file not in filenames:\n return PROGRESS_STATUS.get('STARTED')\n else:\n started_file = os.path.join(root_path, started_file)\n completed_file = os.path.join(root_path, completed_file)\n\n if os.path.getmtime(started_file) > os.path.getmtime(completed_file):\n return PROGRESS_STATUS.get('STARTED')\n\n return PROGRESS_STATUS.get('COMPLETED')\n\n\ndef runJob(cmd, logger):\n try:\n # subprocess.check_output(cmd)\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = process.communicate()[0]\n ret = process.wait()\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info(\"command output: %s\", e.output)\n else:\n logger.info(\"no command output available\")\n return False\n\n",
"step-ids": [
25,
27,
28,
36,
38
]
}
|
[
25,
27,
28,
36,
38
] |
from flask import render_template, request, Response
from flask.views import MethodView, View
from flask.views import View
from repo import ClassifierRepo
from services import PredictDigitService
from settings import CLASSIFIER_STORAGE
class IndexView(View):
def dispatch_request(self):
return render_template('index.html')
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
|
normal
|
{
"blob_id": "3ea42e7ad5301314a39bf522280c084342cd18c5",
"index": 332,
"step-1": "<mask token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-2": "<mask token>\n\n\nclass IndexView(View):\n <mask token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-3": "<mask token>\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-4": "from flask import render_template, request, Response\nfrom flask.views import MethodView, View\nfrom flask.views import View\nfrom repo import ClassifierRepo\nfrom services import PredictDigitService\nfrom settings import CLASSIFIER_STORAGE\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class ErrorResponseCollection(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ResponseCollection(object):
def __init__(self, message=None, data=None):
self.message = message
self.data = data
if self.message == None:
self.message = ' '
def as_md(self):
return '\n\n> **%s**\n\n```json\n%s\n\n```' % (self.message, pprint
.pformat(self.data, width=20, indent=4))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ErrorResponseCollection(object):
<|reserved_special_token_0|>
def as_md(self):
return '\n\n> **%s**\n\n```\n{\n\n\t"%s": "%s"\n\n}\n\n```' % (self
.message, self.param, self.message)
<|reserved_special_token_0|>
class ResponseCollection(object):
def __init__(self, message=None, data=None):
self.message = message
self.data = data
if self.message == None:
self.message = ' '
def as_md(self):
return '\n\n> **%s**\n\n```json\n%s\n\n```' % (self.message, pprint
.pformat(self.data, width=20, indent=4))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ErrorResponseCollection(object):
def __init__(self, status, message, param='message'):
self.status = status
self.message = message
self.param = param
def as_md(self):
return '\n\n> **%s**\n\n```\n{\n\n\t"%s": "%s"\n\n}\n\n```' % (self
.message, self.param, self.message)
<|reserved_special_token_0|>
class ResponseCollection(object):
def __init__(self, message=None, data=None):
self.message = message
self.data = data
if self.message == None:
self.message = ' '
def as_md(self):
return '\n\n> **%s**\n\n```json\n%s\n\n```' % (self.message, pprint
.pformat(self.data, width=20, indent=4))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ErrorResponseCollection(object):
def __init__(self, status, message, param='message'):
self.status = status
self.message = message
self.param = param
def as_md(self):
return '\n\n> **%s**\n\n```\n{\n\n\t"%s": "%s"\n\n}\n\n```' % (self
.message, self.param, self.message)
GET_401 = ErrorResponseCollection(status=401, message=
'Authentication credentials were not provided.', param='detail')
GET_REPO_STATUS_404 = ErrorResponseCollection(status=404, message='NOT FOUND')
class ResponseCollection(object):
def __init__(self, message=None, data=None):
self.message = message
self.data = data
if self.message == None:
self.message = ' '
def as_md(self):
return '\n\n> **%s**\n\n```json\n%s\n\n```' % (self.message, pprint
.pformat(self.data, width=20, indent=4))
GET_BRANCH_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data=dict
(branches=['master', 'develop', 'feature/get_repo']))
GET_REPO_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data={
'repositories': [{'name': 'dogproject', 'url':
'https://github.com/<user>~~~~~~.git', 'latest_commit': '2019-09-12',
'latest_scan': '2019-09-15'}, {'name': 'catproject1234533', 'url':
'https://github.com/<user>~~~~~~.git', 'latest_commit': '2019-10-11',
'latest_scan': '2019-10-11'}], 'repository_size': 31})
GET_COMMIT_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data={
'commit': [{'sha': '123133010b97571286b568432f63395d18a49e05',
'message': 'fix : remove comments and fix code'}, {'sha':
'312313fc750cdea348e23145948d2ee58e29f483b', 'message':
'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'
}, {'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message':
'Initial commit'}]})
GET_CODE_DETECT_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data
={'category': ['log_', 'Token', '룰추가따라 늘어남', '...'], 'log_': [{
'file_name': '.gitignore', 'line_number': 1, 'strings': 'a', 'line1':
'', 'line2':
'# Created by https://www.gitignore.io/api/git,python,django,pycharm+all',
'line3': '## HUFORMATION ##'}], 'Token': [{'file_name': '파일이름',
'line_number': 10, 'strings': 'ddddd', 'line1': '탐지 줄 앞', 'line2':
'탐지된 줄', 'line3': '탐지줄 다음'}, {'file_name': '.gitignore', 'line_number':
1, 'strings': 'a', 'line1': '', 'line2':
'# Created by https://www.gitignore.io/api/git,python,django,pycharm+all',
'line3': '## HUFORMATION ##'}], '룰추가따라 늘어남': [{'file_name': '파일이름',
'line_number': 302, 'strings': 'ddddd', 'line1': '탐지 줄 앞', 'line2':
'탐지된 줄', 'line3': '탐지줄 다음'}, {'file_name': '.gitignore', 'line_number':
1, 'strings': 'a', 'line1': 'aa', 'line2': '~~a~~~', 'line3': '다음줄'}, {
'file_name': '.gitignore', 'line_number': 1, 'strings': 'a', 'line1':
'aa', 'line2': '~~a~~~', 'line3': '다음줄'}], '...': [{'file_name':
'.gitignore', 'line_number': 1, 'strings': 'a', 'line1': 'aa', 'line2':
'~~a~~~', 'line3': '다음줄'}]})
<|reserved_special_token_1|>
import pprint
class ErrorResponseCollection(object):
def __init__(self, status, message, param = "message"):
self.status = status
self.message = message
self.param = param
def as_md(self):
return '\n\n> **%s**\n\n```\n{\n\n\t"%s": "%s"\n\n}\n\n```' % \
(self.message, self.param, self.message)
GET_401 = ErrorResponseCollection(
status= 401,
message = "Authentication credentials were not provided.",
param = "detail"
)
GET_REPO_STATUS_404 = ErrorResponseCollection(
status = 404,
message = "NOT FOUND"
)
class ResponseCollection(object):
def __init__(self, message=None, data=None):
self.message = message
self.data = data
if self.message == None:
self.message = " "
def as_md(self):
return '\n\n> **%s**\n\n```json\n%s\n\n```' % \
(self.message, pprint.pformat(self.data, width=20, indent=4))
GET_BRANCH_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data = dict(branches=[
'master',
'develop',
'feature/get_repo'
])
)
GET_REPO_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data = {
"repositories": [
{
"name": "dogproject",
"url": "https://github.com/<user>~~~~~~.git",
"latest_commit": "2019-09-12",
"latest_scan": "2019-09-15",
},
{
"name": "catproject1234533",
"url": "https://github.com/<user>~~~~~~.git",
"latest_commit": "2019-10-11",
"latest_scan": "2019-10-11",
},
],
"repository_size": 31
}
)
GET_COMMIT_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data ={
'commit': [
{'sha': '123133010b97571286b568432f63395d18a49e05',
'message': 'fix : remove comments and fix code'},
{'sha': '312313fc750cdea348e23145948d2ee58e29f483b',
'message': 'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'},
{'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message': 'Initial commit'}]
}
)
GET_CODE_DETECT_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data = {
"category": [
"log_",
"Token",
"룰추가따라 늘어남",
"..."
],
"log_": [
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "",
"line2": "# Created by https://www.gitignore.io/api/git,python,django,pycharm+all",
"line3": "## HUFORMATION ##"
}
],
"Token": [
{
"file_name": "파일이름",
"line_number": 10,
"strings": "ddddd",
"line1": "탐지 줄 앞",
"line2": "탐지된 줄",
"line3": "탐지줄 다음"
},
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "",
"line2": "# Created by https://www.gitignore.io/api/git,python,django,pycharm+all",
"line3": "## HUFORMATION ##"
}
],
"룰추가따라 늘어남": [
{
"file_name": "파일이름",
"line_number": 302,
"strings": "ddddd",
"line1": "탐지 줄 앞",
"line2": "탐지된 줄",
"line3": "탐지줄 다음"
},
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "aa",
"line2": "~~a~~~",
"line3": "다음줄"
},
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "aa",
"line2": "~~a~~~",
"line3": "다음줄"
},
],
"...": [
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "aa",
"line2": "~~a~~~",
"line3": "다음줄"
},
]
}
)
|
flexible
|
{
"blob_id": "ade4d797a83eaa06e8bde90972a56376d7e0f55a",
"index": 6086,
"step-1": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n <mask token>\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % (self\n .message, self.param, self.message)\n\n\n<mask token>\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n\n def __init__(self, status, message, param='message'):\n self.status = status\n self.message = message\n self.param = param\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % (self\n .message, self.param, self.message)\n\n\n<mask token>\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n\n def __init__(self, status, message, param='message'):\n self.status = status\n self.message = message\n self.param = param\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % (self\n .message, self.param, self.message)\n\n\nGET_401 = ErrorResponseCollection(status=401, message=\n 'Authentication credentials were not provided.', param='detail')\nGET_REPO_STATUS_404 = ErrorResponseCollection(status=404, message='NOT FOUND')\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\nGET_BRANCH_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data=dict\n (branches=['master', 'develop', 'feature/get_repo']))\nGET_REPO_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data={\n 'repositories': [{'name': 'dogproject', 'url':\n 'https://github.com/<user>~~~~~~.git', 'latest_commit': '2019-09-12',\n 'latest_scan': '2019-09-15'}, {'name': 'catproject1234533', 'url':\n 'https://github.com/<user>~~~~~~.git', 'latest_commit': '2019-10-11',\n 'latest_scan': '2019-10-11'}], 'repository_size': 31})\nGET_COMMIT_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data={\n 'commit': [{'sha': '123133010b97571286b568432f63395d18a49e05',\n 'message': 'fix : remove comments and fix code'}, {'sha':\n '312313fc750cdea348e23145948d2ee58e29f483b', 'message':\n 'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'\n }, {'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message':\n 'Initial commit'}]})\nGET_CODE_DETECT_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data\n ={'category': ['log_', 'Token', '룰추가따라 늘어남', '...'], 'log_': [{\n 'file_name': '.gitignore', 'line_number': 1, 'strings': 'a', 'line1':\n '', 'line2':\n '# Created by https://www.gitignore.io/api/git,python,django,pycharm+all',\n 'line3': '## HUFORMATION ##'}], 'Token': [{'file_name': '파일이름',\n 'line_number': 10, 'strings': 'ddddd', 'line1': '탐지 줄 앞', 'line2':\n '탐지된 줄', 'line3': '탐지줄 다음'}, {'file_name': '.gitignore', 'line_number':\n 1, 'strings': 'a', 'line1': '', 'line2':\n '# Created by https://www.gitignore.io/api/git,python,django,pycharm+all',\n 'line3': '## HUFORMATION ##'}], '룰추가따라 늘어남': [{'file_name': '파일이름',\n 'line_number': 302, 'strings': 'ddddd', 'line1': '탐지 줄 앞', 'line2':\n '탐지된 줄', 'line3': '탐지줄 다음'}, {'file_name': '.gitignore', 'line_number':\n 1, 'strings': 'a', 'line1': 'aa', 'line2': '~~a~~~', 'line3': '다음줄'}, {\n 'file_name': '.gitignore', 'line_number': 1, 'strings': 'a', 'line1':\n 'aa', 'line2': '~~a~~~', 'line3': '다음줄'}], '...': [{'file_name':\n '.gitignore', 'line_number': 1, 'strings': 'a', 'line1': 'aa', 'line2':\n '~~a~~~', 'line3': '다음줄'}]})\n",
"step-5": "import pprint\r\n\r\nclass ErrorResponseCollection(object):\r\n def __init__(self, status, message, param = \"message\"):\r\n self.status = status\r\n self.message = message\r\n self.param = param\r\n\r\n def as_md(self):\r\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % \\\r\n (self.message, self.param, self.message)\r\n\r\nGET_401 = ErrorResponseCollection(\r\n status= 401,\r\n message = \"Authentication credentials were not provided.\",\r\n param = \"detail\"\r\n)\r\n\r\nGET_REPO_STATUS_404 = ErrorResponseCollection(\r\n status = 404,\r\n message = \"NOT FOUND\"\r\n)\r\n\r\n\r\n\r\nclass ResponseCollection(object):\r\n def __init__(self, message=None, data=None):\r\n self.message = message\r\n self.data = data\r\n if self.message == None:\r\n self.message = \" \"\r\n def as_md(self):\r\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % \\\r\n (self.message, pprint.pformat(self.data, width=20, indent=4))\r\n\r\nGET_BRANCH_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data = dict(branches=[\r\n 'master',\r\n 'develop',\r\n 'feature/get_repo'\r\n ])\r\n)\r\n\r\nGET_REPO_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data = {\r\n\t\"repositories\": [\r\n\t\t{\r\n\t\t\t\"name\": \"dogproject\",\r\n\t\t\t\"url\": \"https://github.com/<user>~~~~~~.git\",\r\n\t\t\t\"latest_commit\": \"2019-09-12\",\r\n\t\t\t\"latest_scan\": \"2019-09-15\",\r\n\t\t\t\r\n\t\t},\r\n\t\t{\r\n\t\t\t\"name\": \"catproject1234533\",\r\n\t\t\t\"url\": \"https://github.com/<user>~~~~~~.git\",\r\n\t\t\t\"latest_commit\": \"2019-10-11\",\r\n\t\t\t\"latest_scan\": \"2019-10-11\",\r\n\t\t},\r\n \r\n\t],\r\n \"repository_size\": 31\r\n }\r\n)\r\n\r\nGET_COMMIT_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data ={\r\n 'commit': [\r\n {'sha': '123133010b97571286b568432f63395d18a49e05', \r\n 'message': 'fix : remove comments and fix code'}, \r\n {'sha': '312313fc750cdea348e23145948d2ee58e29f483b', \r\n 'message': 'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'}, \r\n {'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message': 'Initial commit'}]\r\n \r\n }\r\n)\r\n\r\n\r\nGET_CODE_DETECT_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data = {\r\n \"category\": [\r\n \"log_\",\r\n \"Token\",\r\n \"룰추가따라 늘어남\",\r\n \"...\"\r\n ],\r\n\r\n \"log_\": [\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"\",\r\n \"line2\": \"# Created by https://www.gitignore.io/api/git,python,django,pycharm+all\",\r\n \"line3\": \"## HUFORMATION ##\"\r\n }\r\n ],\r\n \"Token\": [\r\n {\r\n \"file_name\": \"파일이름\",\r\n \"line_number\": 10,\r\n \"strings\": \"ddddd\",\r\n \"line1\": \"탐지 줄 앞\",\r\n \"line2\": \"탐지된 줄\",\r\n \"line3\": \"탐지줄 다음\"\r\n },\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"\",\r\n \"line2\": \"# Created by https://www.gitignore.io/api/git,python,django,pycharm+all\",\r\n \"line3\": \"## HUFORMATION ##\"\r\n }\r\n ],\r\n \"룰추가따라 늘어남\": [\r\n {\r\n \"file_name\": \"파일이름\",\r\n \"line_number\": 302,\r\n \"strings\": \"ddddd\",\r\n \"line1\": \"탐지 줄 앞\",\r\n \"line2\": \"탐지된 줄\",\r\n \"line3\": \"탐지줄 다음\"\r\n },\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"aa\",\r\n \"line2\": \"~~a~~~\",\r\n \"line3\": \"다음줄\"\r\n },\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"aa\",\r\n \"line2\": \"~~a~~~\",\r\n \"line3\": \"다음줄\"\r\n },\r\n ],\r\n \"...\": [\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"aa\",\r\n \"line2\": \"~~a~~~\",\r\n \"line3\": \"다음줄\"\r\n },\r\n ]\r\n }\r\n)",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root.title('Expanding GUI')
<|reserved_special_token_0|>
my_label.pack()
<|reserved_special_token_0|>
buttonquit.pack()
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = Tk()
root.title('Expanding GUI')
my_img = ImageTk.PhotoImage(Image.open('googledrive.png'))
my_label = Label(image=my_img)
my_label.pack()
buttonquit = Button(root, text='Exit program', command=root.quit)
buttonquit.pack()
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
from PIL import ImageTk, Image
import sys, os
root = Tk()
root.title('Expanding GUI')
my_img = ImageTk.PhotoImage(Image.open('googledrive.png'))
my_label = Label(image=my_img)
my_label.pack()
buttonquit = Button(root, text='Exit program', command=root.quit)
buttonquit.pack()
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
from PIL import ImageTk,Image
import sys, os
# This will display images and icon
root = Tk()
root.title("Expanding GUI")
# With ubuntu, it did not work the icon part
#root.iconbitmap('@/home/gxgarciat/Documents/Tkinter/gdrive.ico')
#root.iconphoto(True, PhotoImage(file="@/home/gxgarciat/Documents/Tkinter/gdrive.ico"))
#root.iconbitmap(os.path.join(sys.path[0], "/home/gxgarciat/Documents/Tkinter/gdrive.ico"))
#root.iconbitmap('~home/gxgarciat/Documents/Tkinter/gdrive.ico')
#root.iconphoto(False, Tk.PhotoImage(file='/home/gxgarciat/Documents/Tkinter/gdrive.ico'))
# Importing images is a 3 step process here.
my_img = ImageTk.PhotoImage(Image.open("googledrive.png"))
my_label = Label(image=my_img)
my_label.pack()
# Adding a quit button
buttonquit = Button(root,text="Exit program",command=root.quit)
buttonquit.pack()
root.mainloop()
|
flexible
|
{
"blob_id": "2da10163a40c9720ca9deecd9afb0e39aa885546",
"index": 5523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroot.title('Expanding GUI')\n<mask token>\nmy_label.pack()\n<mask token>\nbuttonquit.pack()\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nroot.title('Expanding GUI')\nmy_img = ImageTk.PhotoImage(Image.open('googledrive.png'))\nmy_label = Label(image=my_img)\nmy_label.pack()\nbuttonquit = Button(root, text='Exit program', command=root.quit)\nbuttonquit.pack()\nroot.mainloop()\n",
"step-4": "from tkinter import *\nfrom PIL import ImageTk, Image\nimport sys, os\nroot = Tk()\nroot.title('Expanding GUI')\nmy_img = ImageTk.PhotoImage(Image.open('googledrive.png'))\nmy_label = Label(image=my_img)\nmy_label.pack()\nbuttonquit = Button(root, text='Exit program', command=root.quit)\nbuttonquit.pack()\nroot.mainloop()\n",
"step-5": "from tkinter import *\nfrom PIL import ImageTk,Image\n\nimport sys, os\n\n# This will display images and icon\n\nroot = Tk()\nroot.title(\"Expanding GUI\")\n\n# With ubuntu, it did not work the icon part\n#root.iconbitmap('@/home/gxgarciat/Documents/Tkinter/gdrive.ico')\n#root.iconphoto(True, PhotoImage(file=\"@/home/gxgarciat/Documents/Tkinter/gdrive.ico\"))\n#root.iconbitmap(os.path.join(sys.path[0], \"/home/gxgarciat/Documents/Tkinter/gdrive.ico\"))\n#root.iconbitmap('~home/gxgarciat/Documents/Tkinter/gdrive.ico')\n#root.iconphoto(False, Tk.PhotoImage(file='/home/gxgarciat/Documents/Tkinter/gdrive.ico'))\n\n\n# Importing images is a 3 step process here.\nmy_img = ImageTk.PhotoImage(Image.open(\"googledrive.png\"))\nmy_label = Label(image=my_img)\nmy_label.pack()\n\n# Adding a quit button\nbuttonquit = Button(root,text=\"Exit program\",command=root.quit)\nbuttonquit.pack()\n\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sgc
import multiprocessing as mp
# import json
import argparse
import os
import re
#Process argument passed to the script
parser = argparse.ArgumentParser(description='Execute commands parallel on remote servers')
parser.add_argument('-f', action='store', required=True, dest='file', help='servers list')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', action='store', dest='commands', help='commands need to execute')
group.add_argument('-S', action='store', dest='script', help='local script which need to execute on remote servers')
options = parser.parse_args()
#Exit if input file is zero
if os.path.getsize(options.file) == 0:
print("Error: server list file is empty")
exit(2)
#Process the input file and store the server in list variable servers
file = open(options.file, 'r')
servers = []
for line in file:
line = line.strip('\n')
if len(line) == 0 or line in servers:
continue
servers.append(line)
#Exit the script if the servers list is empty
if not servers:
print("Error: server list file is empty")
exit(2)
#Process the commands passed into the script
commands = []
if options.commands and re.match(r'[a-zA-Z0-9]', options.commands):
for item in options.commands.split(','):
item = item.replace('"', '')
commands.append(item)
#Exit the script if command list is empty
if not commands:
print("Error: command list is empty")
parser.print_help()
exit(2)
if options.script:
commands = ['/tmp/'+os.path.basename(options.script)]
#servers = ['localhost', 'centos6web', 'fedora.kannan.lab', '127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4',
# '127.0.0.100', '127.0.0.200', '127.0.0.150', '127.0.0.10', '127.0.0.20', '127.0.0.30']
# servers = ['centos6web', 'fedora.kannan.lab']
# commands = ('sudo shutdown -h 0',)
# commands = ('uptime', 'uname -a', 'sudo fdisk -l')
queue = mp.Queue()
def worker(server, commands):
# print(mp.current_process().name)
output = {}
output['server'] = server
session = sgc.Ssh(server=server)
# print("Connected to server {}".format(server))
# else:
# print("Unable to connect to server {}\n{}".format(server, session.connection_error))
if session.ping == 'Alive':
session.connect()
# print(session.connection)
if session.connection == False:
output['commands'] = session.connection_error
else:
if options.script:
if not os.path.exists(options.script):
output['commands'] = "Error: the script location {} not exists".format(options.script)
print("Error: the script location {} not exists".format(options.script))
else:
curdir = os.getcwd()
folder, file = os.path.split(options.script)
if not folder:
folder = curdir
try:
os.chdir(folder)
sftp = session.Sftp()
sftp.chdir('/tmp')
sftp.put(file, file)
commands = ('/tmp/'+file,)
session.execute(('/bin/chmod a+x /tmp/'+file, ))
except Exception as error:
output['commands'] = error
output['commands'] = session.execute(commands)
else:
output['commands'] = 'Down'
queue.put(output)
# if output != None:
# print("Server {}".format(server))
# for key in output:
# print(key, output[key])
# pool = mp.Pool(processes=mp.cpu_count())
# result = pool.map_async(worker, servers)
# for item in result.get():
# print(json.dumps(item, indent=4))
procs = []
limits = mp.cpu_count()
while servers:
if len(mp.active_children()) < limits:
server = servers.pop()
proc = mp.Process(target=worker, args=(server, commands), name=server)
procs.append(proc)
proc.start()
while mp.active_children() :
if not queue.empty():
item = queue.get()
if item['commands'] == 'Down':
print("Server: {} : Unable to ping".format(item['server']))
continue
if type(item['commands']) != type(dict()):
print("Server: {} : {}".format(item['server'], item['commands']))
continue
print("Server: {}".format(item['server']))
for command in commands:
if item['commands'][command][0] != "":
if options.script:
print("Output of Command: {}".format(options.script))
else:
print("Output of Command: {}".format(command))
print(item['commands'][command][0])
if item['commands'][command][1] != "":
print("Error occurred on command: {}".format(command))
print(item['commands'][command][1])
print("**************************************************************************")
|
normal
|
{
"blob_id": "ace7e5676fcb01c3542952eaacdada9963b8467a",
"index": 5168,
"step-1": "<mask token>\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-f', action='store', required=True, dest='file', help=\n 'servers list')\n<mask token>\ngroup.add_argument('-c', action='store', dest='commands', help=\n 'commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help=\n 'local script which need to execute on remote servers')\n<mask token>\nif os.path.getsize(options.file) == 0:\n print('Error: server list file is empty')\n exit(2)\n<mask token>\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\nif not servers:\n print('Error: server list file is empty')\n exit(2)\n<mask token>\nif options.commands and re.match('[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n if not commands:\n print('Error: command list is empty')\n parser.print_help()\n exit(2)\nif options.script:\n commands = ['/tmp/' + os.path.basename(options.script)]\n<mask token>\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\n<mask token>\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children():\n if not queue.empty():\n item = queue.get()\n if item['commands'] == 'Down':\n print('Server: {} : Unable to ping'.format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print('Server: {} : {}'.format(item['server'], item['commands']))\n continue\n print('Server: {}'.format(item['server']))\n for command in commands:\n if item['commands'][command][0] != '':\n if options.script:\n print('Output of Command: {}'.format(options.script))\n else:\n print('Output of Command: {}'.format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != '':\n print('Error occurred on command: {}'.format(command))\n print(item['commands'][command][1])\n print(\n '**************************************************************************'\n )\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser(description=\n 'Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help=\n 'servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help=\n 'commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help=\n 'local script which need to execute on remote servers')\noptions = parser.parse_args()\nif os.path.getsize(options.file) == 0:\n print('Error: server list file is empty')\n exit(2)\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\nif not servers:\n print('Error: server list file is empty')\n exit(2)\ncommands = []\nif options.commands and re.match('[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n if not commands:\n print('Error: command list is empty')\n parser.print_help()\n exit(2)\nif options.script:\n commands = ['/tmp/' + os.path.basename(options.script)]\nqueue = mp.Queue()\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children():\n if not queue.empty():\n item = queue.get()\n if item['commands'] == 'Down':\n print('Server: {} : Unable to ping'.format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print('Server: {} : {}'.format(item['server'], item['commands']))\n continue\n print('Server: {}'.format(item['server']))\n for command in commands:\n if item['commands'][command][0] != '':\n if options.script:\n print('Output of Command: {}'.format(options.script))\n else:\n print('Output of Command: {}'.format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != '':\n print('Error occurred on command: {}'.format(command))\n print(item['commands'][command][1])\n print(\n '**************************************************************************'\n )\n",
"step-4": "import sgc\nimport multiprocessing as mp\nimport argparse\nimport os\nimport re\nparser = argparse.ArgumentParser(description=\n 'Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help=\n 'servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help=\n 'commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help=\n 'local script which need to execute on remote servers')\noptions = parser.parse_args()\nif os.path.getsize(options.file) == 0:\n print('Error: server list file is empty')\n exit(2)\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\nif not servers:\n print('Error: server list file is empty')\n exit(2)\ncommands = []\nif options.commands and re.match('[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n if not commands:\n print('Error: command list is empty')\n parser.print_help()\n exit(2)\nif options.script:\n commands = ['/tmp/' + os.path.basename(options.script)]\nqueue = mp.Queue()\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children():\n if not queue.empty():\n item = queue.get()\n if item['commands'] == 'Down':\n print('Server: {} : Unable to ping'.format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print('Server: {} : {}'.format(item['server'], item['commands']))\n continue\n print('Server: {}'.format(item['server']))\n for command in commands:\n if item['commands'][command][0] != '':\n if options.script:\n print('Output of Command: {}'.format(options.script))\n else:\n print('Output of Command: {}'.format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != '':\n print('Error occurred on command: {}'.format(command))\n print(item['commands'][command][1])\n print(\n '**************************************************************************'\n )\n",
"step-5": "import sgc\nimport multiprocessing as mp\n# import json\nimport argparse\nimport os\nimport re\n\n\n\n#Process argument passed to the script\nparser = argparse.ArgumentParser(description='Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help='servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help='commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help='local script which need to execute on remote servers')\n\noptions = parser.parse_args()\n\n#Exit if input file is zero\nif os.path.getsize(options.file) == 0:\n print(\"Error: server list file is empty\")\n exit(2)\n\n#Process the input file and store the server in list variable servers\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\n\n#Exit the script if the servers list is empty\nif not servers:\n print(\"Error: server list file is empty\")\n exit(2)\n\n#Process the commands passed into the script\ncommands = []\n\nif options.commands and re.match(r'[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n #Exit the script if command list is empty\n if not commands:\n print(\"Error: command list is empty\")\n parser.print_help()\n exit(2)\n\nif options.script:\n commands = ['/tmp/'+os.path.basename(options.script)]\n\n#servers = ['localhost', 'centos6web', 'fedora.kannan.lab', '127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4',\n# '127.0.0.100', '127.0.0.200', '127.0.0.150', '127.0.0.10', '127.0.0.20', '127.0.0.30']\n# servers = ['centos6web', 'fedora.kannan.lab']\n# commands = ('sudo shutdown -h 0',)\n# commands = ('uptime', 'uname -a', 'sudo fdisk -l')\nqueue = mp.Queue()\ndef worker(server, commands):\n # print(mp.current_process().name)\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n\n # print(\"Connected to server {}\".format(server))\n # else:\n # print(\"Unable to connect to server {}\\n{}\".format(server, session.connection_error))\n if session.ping == 'Alive':\n session.connect()\n # print(session.connection)\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'] = \"Error: the script location {} not exists\".format(options.script)\n print(\"Error: the script location {} not exists\".format(options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = ('/tmp/'+file,)\n session.execute(('/bin/chmod a+x /tmp/'+file, ))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n\n queue.put(output)\n # if output != None:\n # print(\"Server {}\".format(server))\n # for key in output:\n # print(key, output[key])\n\n# pool = mp.Pool(processes=mp.cpu_count())\n# result = pool.map_async(worker, servers)\n# for item in result.get():\n# print(json.dumps(item, indent=4))\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children() :\n if not queue.empty():\n item = queue.get()\n\n if item['commands'] == 'Down':\n print(\"Server: {} : Unable to ping\".format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print(\"Server: {} : {}\".format(item['server'], item['commands']))\n continue\n\n print(\"Server: {}\".format(item['server']))\n for command in commands:\n if item['commands'][command][0] != \"\":\n if options.script:\n print(\"Output of Command: {}\".format(options.script))\n else:\n print(\"Output of Command: {}\".format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != \"\":\n print(\"Error occurred on command: {}\".format(command))\n print(item['commands'][command][1])\n print(\"**************************************************************************\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# helper functions to handle intcode
from collections import defaultdict
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
# instruction pointer
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
# position mode
return self.code[value]
elif mode == 1:
# immediate mode
return value
elif mode == 2:
# relative mode
return self.code[value + self.base]
def get_values(self, modes):
return [
self.get_value(mode, self.code[self.idx + i])
for i, mode in enumerate(modes, start=1)
]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
# position mode
self.code[param] = value
elif mode == 1:
# cannot be in immediate mode
raise ValueError
elif mode == 2:
# relative mode
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
# parse the value
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
# Day 2
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx+3], values[0] + values[1])
self.idx += 4
elif opcode == 2:
# Day 2
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx+3], values[0] * values[1])
self.idx += 4
elif opcode == 3:
# Day 5
if inputs is None or input_idx >= len(inputs):
# halt if we are expecting an input, resume later
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx+1], input_val)
self.idx += 2
elif opcode == 4:
# Day 5
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx+1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
# Day 5
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
# Day 5
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
# Day 5
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx+3], compare_val)
self.idx += 4
elif opcode == 8:
# Day 5
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx+3], compare_val)
self.idx += 4
elif opcode == 9:
# Day 9
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
|
normal
|
{
"blob_id": "68c2fd1d8ca9e1dd9373ca9f641c2920c87b2392",
"index": 1346,
"step-1": "<mask token>\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-2": "<mask token>\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-3": "<mask token>\n\n\ndef read_code(string):\n \"\"\"\n string should be a comma-separated string.\n \"\"\"\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-4": "from collections import defaultdict\n\n\ndef read_code(string):\n \"\"\"\n string should be a comma-separated string.\n \"\"\"\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-5": "# helper functions to handle intcode\n\nfrom collections import defaultdict\n\ndef read_code(string):\n \"\"\"\n string should be a comma-separated string.\n \"\"\"\n\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n def __init__(self, code):\n self.code = code\n self.base = 0\n\n # instruction pointer\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n # position mode\n return self.code[value]\n elif mode == 1:\n # immediate mode\n return value\n elif mode == 2:\n # relative mode\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [\n self.get_value(mode, self.code[self.idx + i])\n for i, mode in enumerate(modes, start=1)\n ]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n \n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n # position mode\n self.code[param] = value\n elif mode == 1:\n # cannot be in immediate mode\n raise ValueError\n elif mode == 2:\n # relative mode\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n\n while True:\n # parse the value\n value = self.code[self.idx]\n opcode = value % 100\n\n if opcode == 1:\n # Day 2\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx+3], values[0] + values[1])\n\n self.idx += 4\n\n elif opcode == 2:\n # Day 2\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx+3], values[0] * values[1])\n\n self.idx += 4\n\n elif opcode == 3:\n # Day 5\n if inputs is None or input_idx >= len(inputs):\n # halt if we are expecting an input, resume later\n return outputs\n\n input_val = inputs[input_idx]\n input_idx += 1\n\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx+1], input_val)\n\n self.idx += 2\n\n elif opcode == 4:\n # Day 5\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx+1])\n outputs.append(v)\n if print_outputs:\n print(v)\n\n self.idx += 2\n\n elif opcode == 5:\n # Day 5\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n\n elif opcode == 6:\n # Day 5\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n\n elif opcode == 7:\n # Day 5\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx+3], compare_val)\n\n self.idx += 4\n\n elif opcode == 8:\n # Day 5\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx+3], compare_val)\n\n self.idx += 4\n\n elif opcode == 9:\n # Day 9\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n\n self.idx += 2\n\n elif opcode == 99:\n self.terminated = True\n return outputs\n\n else:\n raise ValueError",
"step-ids": [
10,
11,
12,
13,
14
]
}
|
[
10,
11,
12,
13,
14
] |
import subprocess
class Audio:
def __init__(self):
self.sox_process = None
def kill_sox(self, timeout=1):
if self.sox_process is not None:
self.sox_process.terminate()
try:
self.sox_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
self.sox_process.kill()
self.sox_process.wait(timeout=timeout)
self.sox_process = None
# trying a lower buffer size
def run_sox(self, scale, preset, buffer=20):
'''
Builds and returns a sox command from a preset object
'''
buffer = 17
multiplier = 100
command_effects = []
command_effects += ["pitch", str(scale * multiplier)]
# Volume boosting
if preset.volume_boost != None:
command_effects += ["vol", str(preset.volume_boost) + "dB"]
else:
# Fix a bug where SoX uses last given volumne
command_effects += ["vol", "0"]
# Downsampling
if preset.downsample_amount != None:
command_effects += ["downsample", str(preset.downsample_amount)]
else:
# Append downsample of 1 to fix a bug where the downsample isn't being reverted
# when we disable the effect with it on.
command_effects += ["downsample", "1"]
command = ["sox", "--buffer", str(buffer), "-q", "-t", "pulseaudio", "default", "-t", "pulseaudio", "Lyrebird-Output"] + command_effects
self.sox_process = subprocess.Popen(command)
def get_sink_name(self, tuple):
if tuple[0] == "sink_name":
return tuple[1]
elif tuple[0] == "source_name":
return tuple[1]
else:
return None
def load_pa_modules(self):
self.null_sink = subprocess.check_call(
'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'.split(' ')
)
self.remap_sink = subprocess.check_call(
'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'\
.split(' ')
)
def get_pactl_modules(self):
'''
Parses `pactl info short` into tuples containing the module ID,
the module type and the attributes of the module. It is designed
only for named modules and as such junk data may be included in
the returned list.
Returns an array of tuples that take the form:
(module_id (str), module_type (str), attributes (attribute tuples))
The attribute tuples:
(key (str), value (str))
An example output might look like:
[
( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),
( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )
]
'''
pactl_list = subprocess.run(["pactl", "list", "short"], capture_output=True, encoding="utf8")
lines = pactl_list.stdout
data = []
split_lines = lines.split("\n")
for line in split_lines:
info = line.split("\t")
if len(info) <= 2:
continue
if info[2] and len(info[2]) > 0:
key_values = list(map(lambda key_value: tuple(key_value.split("=")), info[2].split(" ")))
data.append((info[0], info[1], key_values))
else:
data.append((info[0], info[1], []))
return data
def unload_pa_modules(self):
'''
Unloads all Lyrebird null sinks.
'''
modules = self.get_pactl_modules()
lyrebird_module_ids = []
for module in modules:
if len(module) < 3:
continue;
if len(module[2]) < 1:
continue;
if module[1] == "module-null-sink":
sink_name = self.get_sink_name(module[2][0])
if sink_name == "Lyrebird-Output":
lyrebird_module_ids.append(module[0])
elif module[1] == "module-remap-source":
sink_name = self.get_sink_name(module[2][0])
if sink_name == "Lyrebird-Input":
lyrebird_module_ids.append(module[0])
for id in lyrebird_module_ids:
subprocess.run(["pactl", "unload-module", str(id)])
|
normal
|
{
"blob_id": "d35d26cc50da9a3267edd2da706a4b6e653d22ac",
"index": 6555,
"step-1": "<mask token>\n\n\nclass Audio:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n <mask token>\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == 'sink_name':\n return tuple[1]\n elif tuple[0] == 'source_name':\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n \"\"\"\n Unloads all Lyrebird null sinks.\n \"\"\"\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue\n if len(module[2]) < 1:\n continue\n if module[1] == 'module-null-sink':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Output':\n lyrebird_module_ids.append(module[0])\n elif module[1] == 'module-remap-source':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Input':\n lyrebird_module_ids.append(module[0])\n for id in lyrebird_module_ids:\n subprocess.run(['pactl', 'unload-module', str(id)])\n",
"step-4": "import subprocess\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == 'sink_name':\n return tuple[1]\n elif tuple[0] == 'source_name':\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n \"\"\"\n Unloads all Lyrebird null sinks.\n \"\"\"\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue\n if len(module[2]) < 1:\n continue\n if module[1] == 'module-null-sink':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Output':\n lyrebird_module_ids.append(module[0])\n elif module[1] == 'module-remap-source':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Input':\n lyrebird_module_ids.append(module[0])\n for id in lyrebird_module_ids:\n subprocess.run(['pactl', 'unload-module', str(id)])\n",
"step-5": "import subprocess\n\nclass Audio:\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n # trying a lower buffer size\n def run_sox(self, scale, preset, buffer=20):\n '''\n Builds and returns a sox command from a preset object\n '''\n buffer = 17\n multiplier = 100\n command_effects = []\n\n command_effects += [\"pitch\", str(scale * multiplier)]\n\n # Volume boosting\n if preset.volume_boost != None:\n command_effects += [\"vol\", str(preset.volume_boost) + \"dB\"]\n else:\n # Fix a bug where SoX uses last given volumne\n command_effects += [\"vol\", \"0\"]\n\n # Downsampling\n if preset.downsample_amount != None:\n command_effects += [\"downsample\", str(preset.downsample_amount)]\n else:\n # Append downsample of 1 to fix a bug where the downsample isn't being reverted\n # when we disable the effect with it on.\n command_effects += [\"downsample\", \"1\"]\n\n command = [\"sox\", \"--buffer\", str(buffer), \"-q\", \"-t\", \"pulseaudio\", \"default\", \"-t\", \"pulseaudio\", \"Lyrebird-Output\"] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == \"sink_name\":\n return tuple[1]\n elif tuple[0] == \"source_name\":\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'.split(' ')\n )\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\\\n .split(' ')\n )\n\n def get_pactl_modules(self):\n '''\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n '''\n pactl_list = subprocess.run([\"pactl\", \"list\", \"short\"], capture_output=True, encoding=\"utf8\")\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split(\"\\n\")\n for line in split_lines:\n info = line.split(\"\\t\")\n if len(info) <= 2:\n continue\n \n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.split(\"=\")), info[2].split(\" \")))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n '''\n Unloads all Lyrebird null sinks.\n '''\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue;\n if len(module[2]) < 1:\n continue;\n\n if module[1] == \"module-null-sink\":\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == \"Lyrebird-Output\":\n lyrebird_module_ids.append(module[0])\n elif module[1] == \"module-remap-source\":\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == \"Lyrebird-Input\":\n lyrebird_module_ids.append(module[0])\n\n for id in lyrebird_module_ids:\n subprocess.run([\"pactl\", \"unload-module\", str(id)])\n",
"step-ids": [
1,
6,
8,
9,
10
]
}
|
[
1,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dbutils.widgets.removeAll()
dbutils.widgets.text('input_path', 'Not found', 'input_path')
<|reserved_special_token_0|>
dbutils.widgets.text('model_path', 'Not found', 'model_path')
<|reserved_special_token_0|>
if test:
print(dbutils.widgets.get('input_path'))
print(dbutils.widgets.get('model_path'))
if input_path == 'Not found':
input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'
if model_path == 'Not found':
model_path = (
'/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'
)
<|reserved_special_token_0|>
if test:
display(input_df)
<|reserved_special_token_0|>
if test:
display(input_df)
<|reserved_special_token_0|>
for i in range(div_data.shape[1] - 6):
if X is None:
X = div_data[:, i:i + 3]
y = div_data[:, i + 3]
elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:
X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)
y = np.concatenate((y, div_data[:, i + 3]), axis=0)
<|reserved_special_token_0|>
if test:
print(X_train)
<|reserved_special_token_0|>
clf.fit(X_train, y_train)
<|reserved_special_token_0|>
mean_absolute_error(y_test, y_pred)
pickle.dump(clf, open(model_path, 'wb'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
test = True
dbutils.widgets.removeAll()
dbutils.widgets.text('input_path', 'Not found', 'input_path')
input_path = dbutils.widgets.get('input_path')
dbutils.widgets.text('model_path', 'Not found', 'model_path')
model_path = dbutils.widgets.get('model_path')
if test:
print(dbutils.widgets.get('input_path'))
print(dbutils.widgets.get('model_path'))
if input_path == 'Not found':
input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'
if model_path == 'Not found':
model_path = (
'/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'
)
input_df = spark.read.option('inferSchema', 'true').option('header', 'true'
).csv(input_path)
if test:
display(input_df)
input_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))
cols = ['Year_Month', 'Day', 'Mean_Temperature']
input_df = input_df[cols]
if test:
display(input_df)
input_pivot_df = input_df.groupBy('Year_Month').pivot('Day').sum(
'Mean_Temperature')
div_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.
columns if c not in {'Year_Month'}]).collect())
X = None
y = None
for i in range(div_data.shape[1] - 6):
if X is None:
X = div_data[:, i:i + 3]
y = div_data[:, i + 3]
elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:
X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)
y = np.concatenate((y, div_data[:, i + 3]), axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
if test:
print(X_train)
clf = SVR(gamma='auto', C=0.1, epsilon=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
mean_absolute_error(y_test, y_pred)
pickle.dump(clf, open(model_path, 'wb'))
<|reserved_special_token_1|>
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from pyspark.sql.functions import split, concat, col
from sklearn.svm import SVR
test = True
dbutils.widgets.removeAll()
dbutils.widgets.text('input_path', 'Not found', 'input_path')
input_path = dbutils.widgets.get('input_path')
dbutils.widgets.text('model_path', 'Not found', 'model_path')
model_path = dbutils.widgets.get('model_path')
if test:
print(dbutils.widgets.get('input_path'))
print(dbutils.widgets.get('model_path'))
if input_path == 'Not found':
input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'
if model_path == 'Not found':
model_path = (
'/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'
)
input_df = spark.read.option('inferSchema', 'true').option('header', 'true'
).csv(input_path)
if test:
display(input_df)
input_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))
cols = ['Year_Month', 'Day', 'Mean_Temperature']
input_df = input_df[cols]
if test:
display(input_df)
input_pivot_df = input_df.groupBy('Year_Month').pivot('Day').sum(
'Mean_Temperature')
div_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.
columns if c not in {'Year_Month'}]).collect())
X = None
y = None
for i in range(div_data.shape[1] - 6):
if X is None:
X = div_data[:, i:i + 3]
y = div_data[:, i + 3]
elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:
X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)
y = np.concatenate((y, div_data[:, i + 3]), axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
if test:
print(X_train)
clf = SVR(gamma='auto', C=0.1, epsilon=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
mean_absolute_error(y_test, y_pred)
pickle.dump(clf, open(model_path, 'wb'))
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from pyspark.sql.functions import split, concat,col
from sklearn.svm import SVR
test = True
# In[ ]:
dbutils.widgets.removeAll()
dbutils.widgets.text("input_path", "Not found", "input_path")
input_path = dbutils.widgets.get("input_path")
dbutils.widgets.text("model_path", "Not found", "model_path")
model_path = dbutils.widgets.get("model_path")
if test:
print(dbutils.widgets.get("input_path"))
print(dbutils.widgets.get("model_path"))
if input_path == 'Not found':
input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'
if model_path == 'Not found':
model_path = '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'
# In[ ]:
input_df = spark.read.option("inferSchema","true").option("header", "true").csv(input_path)
if test:
display(input_df)
# In[ ]:
input_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))
cols = ['Year_Month','Day','Mean_Temperature']
input_df = input_df[cols]
if test:
display(input_df)
# In[ ]:
input_pivot_df = input_df.groupBy("Year_Month").pivot("Day").sum("Mean_Temperature")
# In[ ]:
div_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.columns if c not in {'Year_Month'}]).collect())
X = None; y = None
for i in range(div_data.shape[1]-6):
if X is None:
X = div_data[:, i:i+3]
y = div_data[:, i+3]
else:
if None not in div_data[:, i:i+3] or None not in div_data[:, i+3]:
X = np.concatenate((X, div_data[:, i:i+3]), axis=0)
y = np.concatenate((y, div_data[:, i+3]), axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# In[ ]:
if test:
print(X_train)
# In[ ]:
clf = SVR(gamma='auto', C=0.1, epsilon=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
mean_absolute_error(y_test, y_pred)
# In[ ]:
pickle.dump(clf, open(model_path, 'wb'))
|
flexible
|
{
"blob_id": "e48addecdde632607a9c782ff78a769122daab6f",
"index": 1738,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndbutils.widgets.removeAll()\ndbutils.widgets.text('input_path', 'Not found', 'input_path')\n<mask token>\ndbutils.widgets.text('model_path', 'Not found', 'model_path')\n<mask token>\nif test:\n print(dbutils.widgets.get('input_path'))\n print(dbutils.widgets.get('model_path'))\n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = (\n '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n )\n<mask token>\nif test:\n display(input_df)\n<mask token>\nif test:\n display(input_df)\n<mask token>\nfor i in range(div_data.shape[1] - 6):\n if X is None:\n X = div_data[:, i:i + 3]\n y = div_data[:, i + 3]\n elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:\n X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)\n y = np.concatenate((y, div_data[:, i + 3]), axis=0)\n<mask token>\nif test:\n print(X_train)\n<mask token>\nclf.fit(X_train, y_train)\n<mask token>\nmean_absolute_error(y_test, y_pred)\npickle.dump(clf, open(model_path, 'wb'))\n",
"step-3": "<mask token>\ntest = True\ndbutils.widgets.removeAll()\ndbutils.widgets.text('input_path', 'Not found', 'input_path')\ninput_path = dbutils.widgets.get('input_path')\ndbutils.widgets.text('model_path', 'Not found', 'model_path')\nmodel_path = dbutils.widgets.get('model_path')\nif test:\n print(dbutils.widgets.get('input_path'))\n print(dbutils.widgets.get('model_path'))\n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = (\n '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n )\ninput_df = spark.read.option('inferSchema', 'true').option('header', 'true'\n ).csv(input_path)\nif test:\n display(input_df)\ninput_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))\ncols = ['Year_Month', 'Day', 'Mean_Temperature']\ninput_df = input_df[cols]\nif test:\n display(input_df)\ninput_pivot_df = input_df.groupBy('Year_Month').pivot('Day').sum(\n 'Mean_Temperature')\ndiv_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.\n columns if c not in {'Year_Month'}]).collect())\nX = None\ny = None\nfor i in range(div_data.shape[1] - 6):\n if X is None:\n X = div_data[:, i:i + 3]\n y = div_data[:, i + 3]\n elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:\n X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)\n y = np.concatenate((y, div_data[:, i + 3]), axis=0)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42)\nif test:\n print(X_train)\nclf = SVR(gamma='auto', C=0.1, epsilon=0.2)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nmean_absolute_error(y_test, y_pred)\npickle.dump(clf, open(model_path, 'wb'))\n",
"step-4": "import numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nfrom pyspark.sql.functions import split, concat, col\nfrom sklearn.svm import SVR\ntest = True\ndbutils.widgets.removeAll()\ndbutils.widgets.text('input_path', 'Not found', 'input_path')\ninput_path = dbutils.widgets.get('input_path')\ndbutils.widgets.text('model_path', 'Not found', 'model_path')\nmodel_path = dbutils.widgets.get('model_path')\nif test:\n print(dbutils.widgets.get('input_path'))\n print(dbutils.widgets.get('model_path'))\n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = (\n '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n )\ninput_df = spark.read.option('inferSchema', 'true').option('header', 'true'\n ).csv(input_path)\nif test:\n display(input_df)\ninput_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))\ncols = ['Year_Month', 'Day', 'Mean_Temperature']\ninput_df = input_df[cols]\nif test:\n display(input_df)\ninput_pivot_df = input_df.groupBy('Year_Month').pivot('Day').sum(\n 'Mean_Temperature')\ndiv_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.\n columns if c not in {'Year_Month'}]).collect())\nX = None\ny = None\nfor i in range(div_data.shape[1] - 6):\n if X is None:\n X = div_data[:, i:i + 3]\n y = div_data[:, i + 3]\n elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:\n X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)\n y = np.concatenate((y, div_data[:, i + 3]), axis=0)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42)\nif test:\n print(X_train)\nclf = SVR(gamma='auto', C=0.1, epsilon=0.2)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nmean_absolute_error(y_test, y_pred)\npickle.dump(clf, open(model_path, 'wb'))\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nfrom pyspark.sql.functions import split, concat,col\nfrom sklearn.svm import SVR\n\ntest = True\n\n\n# In[ ]:\n\n\ndbutils.widgets.removeAll()\n\ndbutils.widgets.text(\"input_path\", \"Not found\", \"input_path\")\ninput_path = dbutils.widgets.get(\"input_path\")\n\ndbutils.widgets.text(\"model_path\", \"Not found\", \"model_path\")\nmodel_path = dbutils.widgets.get(\"model_path\")\n\nif test:\n print(dbutils.widgets.get(\"input_path\"))\n print(dbutils.widgets.get(\"model_path\"))\n \n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n\n\n# In[ ]:\n\n\ninput_df = spark.read.option(\"inferSchema\",\"true\").option(\"header\", \"true\").csv(input_path)\n\nif test:\n display(input_df)\n\n\n# In[ ]:\n\n\ninput_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))\ncols = ['Year_Month','Day','Mean_Temperature']\ninput_df = input_df[cols]\n\nif test:\n display(input_df)\n\n\n# In[ ]:\n\n\ninput_pivot_df = input_df.groupBy(\"Year_Month\").pivot(\"Day\").sum(\"Mean_Temperature\")\n\n\n# In[ ]:\n\n\ndiv_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.columns if c not in {'Year_Month'}]).collect())\n\nX = None; y = None\nfor i in range(div_data.shape[1]-6):\n if X is None:\n X = div_data[:, i:i+3]\n y = div_data[:, i+3]\n else:\n if None not in div_data[:, i:i+3] or None not in div_data[:, i+3]:\n X = np.concatenate((X, div_data[:, i:i+3]), axis=0)\n y = np.concatenate((y, div_data[:, i+3]), axis=0)\n \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n\n# In[ ]:\n\n\nif test:\n print(X_train)\n\n\n# In[ ]:\n\n\nclf = SVR(gamma='auto', C=0.1, epsilon=0.2)\nclf.fit(X_train, y_train) \ny_pred = clf.predict(X_test)\nmean_absolute_error(y_test, y_pred)\n\n\n# In[ ]:\n\n\npickle.dump(clf, open(model_path, 'wb'))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pymongo import MongoClient, GEOSPHERE, GEO2D
import os, sys, json, pprint
sys.path.insert(0, '../utils')
import path_functions
client = MongoClient( 'localhost', 27017 )
db = client[ 'nfcdata' ]
json_files_path_list = path_functions.get_json_files('../../ftp-data/geojson-files/quikscat-l2b12')
for json_file in json_files_path_list:
current_collection = 'GeoJSON-quikscat-l2b12-' + path_functions.get_file_name( json_file )
print(current_collection)
collection_list = db.collection_names()
if current_collection not in collection_list:
collection = db[current_collection]
collection.create_index([( "geometry", GEOSPHERE )])
json_docs = json.load( open( json_file ) )
for doc in json_docs['features']:
collection.insert( doc )
# -- DROP COLLECTIONS --
# collection_list = db.collection_names()
# for collection in collection_list:
# db.drop_collection(collection)
# -- PRINT COLLECTIONS --
print( db.collection_names() )
# # -- PRINT INDEXES --
# collection_list = db.collection_names()
# for current_collection in collection_list:
# collection = db[current_collection]
# print( 'Index: ', sorted( list( collection.index_information() ) ) )
# -- PRINT DATA --
# collection = db['GeoJSON-quikscat-l2b12-005']
# cursor = collection.find({})
# for document in cursor:
# print('\n - - - - - - - DOCUMENTO - - - - - - - \n')
# print(document)
# -- SPATIAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[ current_collection ]
for doc in collection.find(
{ "geometry": {
"$geoWithin": {
"$geometry" : {
"type": "Polygon" ,
"coordinates" : [
[
[-77.49, -89.70],
[0.00, 0.00],
[10.00, 10.00],
[-77.49, -89.70]
]
]
} } } } ):
pprint.pprint( doc )
# -- TEMPORAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find( { "properties.time": 2009002 } ).limit(3):
pprint.pprint(doc)
# -- TEMPORAL-SPATIAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[ current_collection ]
for doc in collection.find(
{ "geometry": {
"$geoWithin": {
"$geometry" : {
"type": "Polygon" ,
"coordinates" : [
[
[-77.49, -89.70],
[0.00, 0.00],
[10.00, 10.00],
[-77.49, -89.70]
]
]
} } }, "properties.time": 2009003 } ):
pprint.pprint( doc )
# collection = db['quikscat-l2b12-001']
# cursor = collection.find({})
# for document in cursor:
# pprint.pprint( document )
|
normal
|
{
"blob_id": "cceda9a8a0188499ae0aa588701bb8104b5ed313",
"index": 1041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, '../utils')\n<mask token>\nfor json_file in json_files_path_list:\n current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.\n get_file_name(json_file))\n print(current_collection)\n collection_list = db.collection_names()\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([('geometry', GEOSPHERE)])\n json_docs = json.load(open(json_file))\n for doc in json_docs['features']:\n collection.insert(doc)\nprint(db.collection_names())\n<mask token>\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}}):\n pprint.pprint(doc)\n<mask token>\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'properties.time': 2009002}).limit(3):\n pprint.pprint(doc)\n<mask token>\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):\n pprint.pprint(doc)\n",
"step-3": "<mask token>\nsys.path.insert(0, '../utils')\n<mask token>\nclient = MongoClient('localhost', 27017)\ndb = client['nfcdata']\njson_files_path_list = path_functions.get_json_files(\n '../../ftp-data/geojson-files/quikscat-l2b12')\nfor json_file in json_files_path_list:\n current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.\n get_file_name(json_file))\n print(current_collection)\n collection_list = db.collection_names()\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([('geometry', GEOSPHERE)])\n json_docs = json.load(open(json_file))\n for doc in json_docs['features']:\n collection.insert(doc)\nprint(db.collection_names())\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}}):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'properties.time': 2009002}).limit(3):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):\n pprint.pprint(doc)\n",
"step-4": "from pymongo import MongoClient, GEOSPHERE, GEO2D\nimport os, sys, json, pprint\nsys.path.insert(0, '../utils')\nimport path_functions\nclient = MongoClient('localhost', 27017)\ndb = client['nfcdata']\njson_files_path_list = path_functions.get_json_files(\n '../../ftp-data/geojson-files/quikscat-l2b12')\nfor json_file in json_files_path_list:\n current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.\n get_file_name(json_file))\n print(current_collection)\n collection_list = db.collection_names()\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([('geometry', GEOSPHERE)])\n json_docs = json.load(open(json_file))\n for doc in json_docs['features']:\n collection.insert(doc)\nprint(db.collection_names())\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}}):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'properties.time': 2009002}).limit(3):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):\n pprint.pprint(doc)\n",
"step-5": "\nfrom pymongo import MongoClient, GEOSPHERE, GEO2D\n\nimport os, sys, json, pprint\nsys.path.insert(0, '../utils') \nimport path_functions \n\n\nclient = MongoClient( 'localhost', 27017 )\ndb = client[ 'nfcdata' ]\n\njson_files_path_list = path_functions.get_json_files('../../ftp-data/geojson-files/quikscat-l2b12')\n\nfor json_file in json_files_path_list:\n \n current_collection = 'GeoJSON-quikscat-l2b12-' + path_functions.get_file_name( json_file )\n print(current_collection)\n collection_list = db.collection_names()\n\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([( \"geometry\", GEOSPHERE )])\n\n json_docs = json.load( open( json_file ) )\n for doc in json_docs['features']:\n collection.insert( doc )\n\n\n# -- DROP COLLECTIONS --\n# collection_list = db.collection_names()\n# for collection in collection_list:\n# db.drop_collection(collection)\n\n# -- PRINT COLLECTIONS --\nprint( db.collection_names() )\n\n# # -- PRINT INDEXES --\n# collection_list = db.collection_names()\n# for current_collection in collection_list:\n# collection = db[current_collection]\n# print( 'Index: ', sorted( list( collection.index_information() ) ) )\n\n# -- PRINT DATA --\n# collection = db['GeoJSON-quikscat-l2b12-005']\n# cursor = collection.find({})\n# for document in cursor:\n# print('\\n - - - - - - - DOCUMENTO - - - - - - - \\n')\n# print(document) \n\n# -- SPATIAL QUERYING USING 2D INDEX\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[ current_collection ]\n\n for doc in collection.find( \n { \"geometry\": { \n \"$geoWithin\": {\n \"$geometry\" : {\n \"type\": \"Polygon\" , \n \"coordinates\" : [ \n [\n [-77.49, -89.70],\n [0.00, 0.00],\n [10.00, 10.00],\n [-77.49, -89.70]\n ]\n ]\n } } } } ):\n pprint.pprint( doc )\n\n# -- TEMPORAL QUERYING USING 2D INDEX\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find( { \"properties.time\": 2009002 } ).limit(3):\n pprint.pprint(doc)\n\n# -- TEMPORAL-SPATIAL QUERYING USING 2D INDEX\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[ current_collection ]\n\n for doc in collection.find( \n { \"geometry\": { \n \"$geoWithin\": {\n \"$geometry\" : {\n \"type\": \"Polygon\" , \n \"coordinates\" : [ \n [\n [-77.49, -89.70],\n [0.00, 0.00],\n [10.00, 10.00],\n [-77.49, -89.70]\n ]\n ]\n } } }, \"properties.time\": 2009003 } ):\n pprint.pprint( doc )\n\n# collection = db['quikscat-l2b12-001']\n# cursor = collection.find({})\n# for document in cursor:\n# pprint.pprint( document )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',
'~/.plotter/mainnet'))).resolve()
<|reserved_special_token_1|>
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',
'~/.plotter/mainnet'))).resolve()
<|reserved_special_token_1|>
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("PLOTTER_ROOT", "~/.plotter/mainnet"))).resolve()
|
flexible
|
{
"blob_id": "3a8164299fa51b7d781f2b80d77cfba05b5f6915",
"index": 4157,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',\n '~/.plotter/mainnet'))).resolve()\n",
"step-3": "import os\nfrom pathlib import Path\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',\n '~/.plotter/mainnet'))).resolve()\n",
"step-4": "import os\nfrom pathlib import Path\n\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv(\"PLOTTER_ROOT\", \"~/.plotter/mainnet\"))).resolve()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for song in songs:
track, title = song
print(' track number {}\t, title {}'.format(track, title))
<|reserved_special_token_1|>
Album, artist, year, songs = 'More Mayhem', 'Imelda May', 2001, ((1,
'pulling the rug'), (2, 'psycho'), (3, 'mayhem'), (4,
'kentisch town waltz'))
for song in songs:
track, title = song
print(' track number {}\t, title {}'.format(track, title))
<|reserved_special_token_1|>
Album,artist,year,songs="More Mayhem","Imelda May",2001,((1,"pulling the rug"),(2,"psycho"),(3,"mayhem"),(4,"kentisch town waltz"))
for song in songs:
track,title=song
print(" track number {}\t, title {}".format(track,title))
|
flexible
|
{
"blob_id": "30f02b956af68960804f0cb57695bdbf8510bc43",
"index": 7290,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor song in songs:\n track, title = song\n print(' track number {}\\t, title {}'.format(track, title))\n",
"step-3": "Album, artist, year, songs = 'More Mayhem', 'Imelda May', 2001, ((1,\n 'pulling the rug'), (2, 'psycho'), (3, 'mayhem'), (4,\n 'kentisch town waltz'))\nfor song in songs:\n track, title = song\n print(' track number {}\\t, title {}'.format(track, title))\n",
"step-4": "Album,artist,year,songs=\"More Mayhem\",\"Imelda May\",2001,((1,\"pulling the rug\"),(2,\"psycho\"),(3,\"mayhem\"),(4,\"kentisch town waltz\"))\nfor song in songs:\n track,title=song\n print(\" track number {}\\t, title {}\".format(track,title))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DrawingBrush:
def __init__(self, size, color, radius):
self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32
).convert_alpha()
self.drawColor = color
self.size = radius
self.winSize = size
self.winSurface = pygame.display.get_surface()
def Draw(self, pos):
pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DrawingBrush:
def __init__(self, size, color, radius):
self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32
).convert_alpha()
self.drawColor = color
self.size = radius
self.winSize = size
self.winSurface = pygame.display.get_surface()
def Draw(self, pos):
pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)
def Clear(self):
self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32
).convert_alpha()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DrawingBrush:
def __init__(self, size, color, radius):
self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32
).convert_alpha()
self.drawColor = color
self.size = radius
self.winSize = size
self.winSurface = pygame.display.get_surface()
def Draw(self, pos):
pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)
def Clear(self):
self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32
).convert_alpha()
def Update(self):
self.winSurface.blit(self.drawSurface, [0, 0])
<|reserved_special_token_1|>
import pygame
class DrawingBrush:
def __init__(self, size, color, radius):
self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32
).convert_alpha()
self.drawColor = color
self.size = radius
self.winSize = size
self.winSurface = pygame.display.get_surface()
def Draw(self, pos):
pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)
def Clear(self):
self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32
).convert_alpha()
def Update(self):
self.winSurface.blit(self.drawSurface, [0, 0])
<|reserved_special_token_1|>
import pygame
class DrawingBrush():
def __init__(self, size, color, radius):
self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32).convert_alpha()
self.drawColor = color
self.size = radius
self.winSize = size
self.winSurface = pygame.display.get_surface()
def Draw(self, pos):
pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)
def Clear(self):
self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32).convert_alpha()
def Update(self):
self.winSurface.blit(self.drawSurface, [0,0])
|
flexible
|
{
"blob_id": "45658cdfcd1529bbf803294cd7cec32d6d2c2198",
"index": 7638,
"step-1": "<mask token>\n\n\nclass DrawingBrush:\n\n def __init__(self, size, color, radius):\n self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32\n ).convert_alpha()\n self.drawColor = color\n self.size = radius\n self.winSize = size\n self.winSurface = pygame.display.get_surface()\n\n def Draw(self, pos):\n pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DrawingBrush:\n\n def __init__(self, size, color, radius):\n self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32\n ).convert_alpha()\n self.drawColor = color\n self.size = radius\n self.winSize = size\n self.winSurface = pygame.display.get_surface()\n\n def Draw(self, pos):\n pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)\n\n def Clear(self):\n self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32\n ).convert_alpha()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DrawingBrush:\n\n def __init__(self, size, color, radius):\n self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32\n ).convert_alpha()\n self.drawColor = color\n self.size = radius\n self.winSize = size\n self.winSurface = pygame.display.get_surface()\n\n def Draw(self, pos):\n pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)\n\n def Clear(self):\n self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32\n ).convert_alpha()\n\n def Update(self):\n self.winSurface.blit(self.drawSurface, [0, 0])\n",
"step-4": "import pygame\n\n\nclass DrawingBrush:\n\n def __init__(self, size, color, radius):\n self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32\n ).convert_alpha()\n self.drawColor = color\n self.size = radius\n self.winSize = size\n self.winSurface = pygame.display.get_surface()\n\n def Draw(self, pos):\n pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)\n\n def Clear(self):\n self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32\n ).convert_alpha()\n\n def Update(self):\n self.winSurface.blit(self.drawSurface, [0, 0])\n",
"step-5": "import pygame\n\nclass DrawingBrush():\n def __init__(self, size, color, radius):\n self.drawSurface = pygame.Surface(size, pygame.SRCALPHA, 32).convert_alpha()\n self.drawColor = color\n self.size = radius\n self.winSize = size\n self.winSurface = pygame.display.get_surface()\n\n def Draw(self, pos):\n pygame.draw.circle(self.drawSurface, self.drawColor, pos, self.size)\n\n def Clear(self):\n self.drawSurface = pygame.Surface(self.winSize, pygame.SRCALPHA, 32).convert_alpha()\n\n def Update(self):\n self.winSurface.blit(self.drawSurface, [0,0])\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""CPU functionality."""
import sys
HLT = 0b00000001
LDI = 0b10000010
PRN = 0b01000111
MUL = 0b10100010
PUSH = 0b01000101
POP = 0b01000110
CMP = 0b10100111
CALL = 0b01010000
RET = 0b00010001
ADD = 0b10100000
CMP = 0b10100111
JMP = 0b01010100
JEQ = 0b01010101
JNE = 0b01010110
AND = 0b10101000
NOT = 0b01101001
OR = 0b10101010
XOR = 0b10101011
SHL = 0b10101100
SHR = 0b10101101
MOD = 0b10100100
class CPU:
"""Main CPU class."""
def __init__(self):
"""Construct a new CPU."""
self.reg = [0] * 8
self.pc = 0
self.ram = [0] * 256
self.running = True
self.reg[7] = 0xf4
self.sp = self.reg[7]
self.fl = 0b00000000
self.branchtable = {}
self.branchtable[HLT] = self.op_hlt
self.branchtable[LDI] = self.op_ldi
self.branchtable[PRN] = self.op_prn
self.branchtable[MUL] = self.op_mul
self.branchtable[PUSH] = self.op_push
self.branchtable[POP] = self.op_pop
self.branchtable[CALL] = self.op_call
self.branchtable[RET] = self.op_ret
self.branchtable[ADD] = self.op_add
self.branchtable[CMP] = self.op_cmp
self.branchtable[JMP] = self.op_jmp
self.branchtable[JEQ] = self.op_jeq
self.branchtable[JNE] = self.op_jne
self.branchtable[AND] = self.op_and
self.branchtable[NOT] = self.op_not
self.branchtable[OR] = self.op_or
self.branchtable[XOR] = self.op_xor
self.branchtable[SHL] = self.op_shl
self.branchtable[SHR] = self.op_shr
self.branchtable[MOD] = self.op_mod
def ram_read(self, MAR):
return self.ram[MAR]
def ram_write(self, MAR, MDR):
self.ram[MAR] = MDR
def op_hlt(self, operand_a, operand_b):
self.running = False
def op_ldi(self, operand_a, operand_b):
self.reg[operand_a] = operand_b
# self.pc += 3
def op_prn(self, operand_a, operand_b):
print('prn:', self.reg[operand_a])
# self.pc += 2
def op_mul(self, operand_a, operand_b):
self.alu('MUL', operand_a, operand_b)
# self.pc += 3
def op_push(self, operand_a, operand_b):
self.sp -= 1
val = self.reg[operand_a]
self.ram_write(self.sp, val)
# self.pc += 2
def op_pop(self, operand_a, operand_b):
self.reg[operand_a] = self.ram_read(self.sp)
# self.pc += 2
self.sp += 1
def op_call(self, operand_a, operand_b):
ret_addr = self.pc + 2
self.sp -= 1
self.ram_write(self.sp, ret_addr) # write sp and pc location to ram
sub_addr = self.reg[operand_a]
self.pc = sub_addr
def op_ret(self, operand_a, operand_b):
ret_addr = self.ram_read(self.sp) # set ret_addr to location in ram
self.sp += 1
self.pc = ret_addr
def op_add(self, operand_a, operand_b):
self.alu('ADD', operand_a, operand_b)
def op_cmp(self, operand_a, operand_b):
self.alu('CMP', operand_a, operand_b)
def op_jmp(self, operand_a, operand_b):
self.pc = self.reg[operand_a]
def op_jeq(self, operand_a, operand_b):
if self.fl == 0b00000001:
self.op_jmp(operand_a, operand_b)
else:
self.pc += 2
def op_jne(self, operand_a, operand_b):
if self.fl != 0b00000001:
self.op_jmp(operand_a, operand_b)
else:
self.pc += 2
def op_and(self, operand_a, operand_b):
self.alu('AND', operand_a, operand_b)
def op_or(self, operand_a, operand_b):
self.alu('ADD', operand_a, operand_b)
def op_xor(self, operand_a, operand_b):
self.alu('CMP', operand_a, operand_b)
def op_not(self, operand_a, operand_b):
self.alu('ADD', operand_a, operand_b)
def op_shl(self, operand_a, operand_b):
self.alu('CMP', operand_a, operand_b)
def op_shr(self, operand_a, operand_b):
self.alu('ADD', operand_a, operand_b)
def op_mod(self, operand_a, operand_b):
self.alu('CMP', operand_a, operand_b)
def load(self, filename):
"""Load a program into memory."""
address = 0
with open(filename) as file:
for line in file:
val = line.split("#")[0].strip()
if val == '':
continue
instruction = int(val, 2)
self.ram[address] = instruction
address += 1
# For now, we've just hardcoded a program:
# program = [
# # From print8.ls8
# 0b10000010, # LDI R0,8
# 0b00000000,
# 0b00001000,
# 0b01000111, # PRN R0
# 0b00000000,
# 0b00000001, # HLT
# ]
# for instruction in program:
# self.ram[address] = instruction
# address += 1
def alu(self, op, reg_a, reg_b):
"""ALU operations."""
if op == 'ADD':
self.reg[reg_a] = self.reg[reg_a] + self.reg[reg_b]
elif op == 'MUL':
self.reg[reg_a] = self.reg[reg_a] * self.reg[reg_b]
elif op == 'CMP':
if self.reg[reg_a] < self.reg[reg_b]:
self.fl = 0b00000100
elif self.reg[reg_a] > self.reg[reg_b]:
self.fl = 0b00000010
elif self.reg[reg_a] == self.reg[reg_b]:
self.fl = 0b00000001
elif op == 'AND':
self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]
elif op == 'OR':
self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]
elif op == 'XOR':
self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]
elif op == 'NOT':
self.reg[reg_a] = ~self.reg[reg_a]
elif op == 'SHL':
self.reg[reg_a] = self.reg[reg_a] << self.reg[reg_b]
elif op == 'SHR':
self.reg[reg_a] = self.reg[reg_a] >> self.reg[reg_b]
elif op == 'MOD':
if self.reg[reg_b] == 0:
print('ERROR: divide by 0')
self.op_hlt()
else:
remainder = self.reg[reg_a] % self.reg[reg_b]
self.reg[reg_a] = remainder
else:
raise Exception("Unsupported ALU operation")
def trace(self):
"""
Handy function to print out the CPU state. You might want to call this
from run() if you need help debugging.
"""
print(f"TRACE: %02X | %02X %02X %02X |" % (
self.pc,
# self.fl,
# self.ie,
self.ram_read(self.pc),
self.ram_read(self.pc + 1),
self.ram_read(self.pc + 2)
), end='')
for i in range(8):
print(" %02X" % self.reg[i], end='')
print()
def run(self):
"""Run the CPU."""
self.trace()
while self.running is True:
IR = self.ram_read(self.pc)
operand_a = self.ram_read(self.pc + 1)
operand_b = self.ram_read(self.pc + 2)
# This increments the pc position automatically
op_size = IR >> 6
ins_set = ((IR >> 4) & 0b1) == 1
if not ins_set:
self.pc += op_size + 1
if IR in self.branchtable:
self.branchtable[IR](operand_a, operand_b)
# SAVE WHERE WE'RE COMING FROM TO THE STACK AND SET PC TO WHERE WE'RE GOING
|
normal
|
{
"blob_id": "58d144b2c6c307719cef0b5097945c8206135ccf",
"index": 6048,
"step-1": "<mask token>\n\n\nclass CPU:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def op_ldi(self, operand_a, operand_b):\n self.reg[operand_a] = operand_b\n\n def op_prn(self, operand_a, operand_b):\n print('prn:', self.reg[operand_a])\n\n def op_mul(self, operand_a, operand_b):\n self.alu('MUL', operand_a, operand_b)\n\n def op_push(self, operand_a, operand_b):\n self.sp -= 1\n val = self.reg[operand_a]\n self.ram_write(self.sp, val)\n\n def op_pop(self, operand_a, operand_b):\n self.reg[operand_a] = self.ram_read(self.sp)\n self.sp += 1\n\n def op_call(self, operand_a, operand_b):\n ret_addr = self.pc + 2\n self.sp -= 1\n self.ram_write(self.sp, ret_addr)\n sub_addr = self.reg[operand_a]\n self.pc = sub_addr\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def op_jne(self, operand_a, operand_b):\n if self.fl != 1:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_and(self, operand_a, operand_b):\n self.alu('AND', operand_a, operand_b)\n\n def op_or(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_xor(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n print(f'TRACE: %02X | %02X %02X %02X |' % (self.pc, self.ram_read(\n self.pc), self.ram_read(self.pc + 1), self.ram_read(self.pc + 2\n )), end='')\n for i in range(8):\n print(' %02X' % self.reg[i], end='')\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.trace()\n while self.running is True:\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n op_size = IR >> 6\n ins_set = IR >> 4 & 1 == 1\n if not ins_set:\n self.pc += op_size + 1\n if IR in self.branchtable:\n self.branchtable[IR](operand_a, operand_b)\n",
"step-2": "<mask token>\n\n\nclass CPU:\n <mask token>\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 8\n self.pc = 0\n self.ram = [0] * 256\n self.running = True\n self.reg[7] = 244\n self.sp = self.reg[7]\n self.fl = 0\n self.branchtable = {}\n self.branchtable[HLT] = self.op_hlt\n self.branchtable[LDI] = self.op_ldi\n self.branchtable[PRN] = self.op_prn\n self.branchtable[MUL] = self.op_mul\n self.branchtable[PUSH] = self.op_push\n self.branchtable[POP] = self.op_pop\n self.branchtable[CALL] = self.op_call\n self.branchtable[RET] = self.op_ret\n self.branchtable[ADD] = self.op_add\n self.branchtable[CMP] = self.op_cmp\n self.branchtable[JMP] = self.op_jmp\n self.branchtable[JEQ] = self.op_jeq\n self.branchtable[JNE] = self.op_jne\n self.branchtable[AND] = self.op_and\n self.branchtable[NOT] = self.op_not\n self.branchtable[OR] = self.op_or\n self.branchtable[XOR] = self.op_xor\n self.branchtable[SHL] = self.op_shl\n self.branchtable[SHR] = self.op_shr\n self.branchtable[MOD] = self.op_mod\n <mask token>\n\n def ram_write(self, MAR, MDR):\n self.ram[MAR] = MDR\n\n def op_hlt(self, operand_a, operand_b):\n self.running = False\n\n def op_ldi(self, operand_a, operand_b):\n self.reg[operand_a] = operand_b\n\n def op_prn(self, operand_a, operand_b):\n print('prn:', self.reg[operand_a])\n\n def op_mul(self, operand_a, operand_b):\n self.alu('MUL', operand_a, operand_b)\n\n def op_push(self, operand_a, operand_b):\n self.sp -= 1\n val = self.reg[operand_a]\n self.ram_write(self.sp, val)\n\n def op_pop(self, operand_a, operand_b):\n self.reg[operand_a] = self.ram_read(self.sp)\n self.sp += 1\n\n def op_call(self, operand_a, operand_b):\n ret_addr = self.pc + 2\n self.sp -= 1\n self.ram_write(self.sp, ret_addr)\n sub_addr = self.reg[operand_a]\n self.pc = sub_addr\n\n def op_ret(self, operand_a, operand_b):\n ret_addr = self.ram_read(self.sp)\n self.sp += 1\n self.pc = ret_addr\n <mask token>\n\n def op_cmp(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n <mask token>\n\n def op_jne(self, operand_a, operand_b):\n if self.fl != 1:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_and(self, operand_a, operand_b):\n self.alu('AND', operand_a, operand_b)\n\n def op_or(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_xor(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n\n def op_shl(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_shr(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_mod(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n if op == 'ADD':\n self.reg[reg_a] = self.reg[reg_a] + self.reg[reg_b]\n elif op == 'MUL':\n self.reg[reg_a] = self.reg[reg_a] * self.reg[reg_b]\n elif op == 'CMP':\n if self.reg[reg_a] < self.reg[reg_b]:\n self.fl = 4\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.fl = 2\n elif self.reg[reg_a] == self.reg[reg_b]:\n self.fl = 1\n elif op == 'AND':\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n elif op == 'OR':\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n elif op == 'XOR':\n self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]\n elif op == 'NOT':\n self.reg[reg_a] = ~self.reg[reg_a]\n elif op == 'SHL':\n self.reg[reg_a] = self.reg[reg_a] << self.reg[reg_b]\n elif op == 'SHR':\n self.reg[reg_a] = self.reg[reg_a] >> self.reg[reg_b]\n elif op == 'MOD':\n if self.reg[reg_b] == 0:\n print('ERROR: divide by 0')\n self.op_hlt()\n else:\n remainder = self.reg[reg_a] % self.reg[reg_b]\n self.reg[reg_a] = remainder\n else:\n raise Exception('Unsupported ALU operation')\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n print(f'TRACE: %02X | %02X %02X %02X |' % (self.pc, self.ram_read(\n self.pc), self.ram_read(self.pc + 1), self.ram_read(self.pc + 2\n )), end='')\n for i in range(8):\n print(' %02X' % self.reg[i], end='')\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.trace()\n while self.running is True:\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n op_size = IR >> 6\n ins_set = IR >> 4 & 1 == 1\n if not ins_set:\n self.pc += op_size + 1\n if IR in self.branchtable:\n self.branchtable[IR](operand_a, operand_b)\n",
"step-3": "<mask token>\n\n\nclass CPU:\n <mask token>\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 8\n self.pc = 0\n self.ram = [0] * 256\n self.running = True\n self.reg[7] = 244\n self.sp = self.reg[7]\n self.fl = 0\n self.branchtable = {}\n self.branchtable[HLT] = self.op_hlt\n self.branchtable[LDI] = self.op_ldi\n self.branchtable[PRN] = self.op_prn\n self.branchtable[MUL] = self.op_mul\n self.branchtable[PUSH] = self.op_push\n self.branchtable[POP] = self.op_pop\n self.branchtable[CALL] = self.op_call\n self.branchtable[RET] = self.op_ret\n self.branchtable[ADD] = self.op_add\n self.branchtable[CMP] = self.op_cmp\n self.branchtable[JMP] = self.op_jmp\n self.branchtable[JEQ] = self.op_jeq\n self.branchtable[JNE] = self.op_jne\n self.branchtable[AND] = self.op_and\n self.branchtable[NOT] = self.op_not\n self.branchtable[OR] = self.op_or\n self.branchtable[XOR] = self.op_xor\n self.branchtable[SHL] = self.op_shl\n self.branchtable[SHR] = self.op_shr\n self.branchtable[MOD] = self.op_mod\n <mask token>\n\n def ram_write(self, MAR, MDR):\n self.ram[MAR] = MDR\n\n def op_hlt(self, operand_a, operand_b):\n self.running = False\n\n def op_ldi(self, operand_a, operand_b):\n self.reg[operand_a] = operand_b\n\n def op_prn(self, operand_a, operand_b):\n print('prn:', self.reg[operand_a])\n\n def op_mul(self, operand_a, operand_b):\n self.alu('MUL', operand_a, operand_b)\n\n def op_push(self, operand_a, operand_b):\n self.sp -= 1\n val = self.reg[operand_a]\n self.ram_write(self.sp, val)\n\n def op_pop(self, operand_a, operand_b):\n self.reg[operand_a] = self.ram_read(self.sp)\n self.sp += 1\n\n def op_call(self, operand_a, operand_b):\n ret_addr = self.pc + 2\n self.sp -= 1\n self.ram_write(self.sp, ret_addr)\n sub_addr = self.reg[operand_a]\n self.pc = sub_addr\n\n def op_ret(self, operand_a, operand_b):\n ret_addr = self.ram_read(self.sp)\n self.sp += 1\n self.pc = ret_addr\n\n def op_add(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_cmp(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n <mask token>\n\n def op_jne(self, operand_a, operand_b):\n if self.fl != 1:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_and(self, operand_a, operand_b):\n self.alu('AND', operand_a, operand_b)\n\n def op_or(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_xor(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n\n def op_shl(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_shr(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_mod(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n if op == 'ADD':\n self.reg[reg_a] = self.reg[reg_a] + self.reg[reg_b]\n elif op == 'MUL':\n self.reg[reg_a] = self.reg[reg_a] * self.reg[reg_b]\n elif op == 'CMP':\n if self.reg[reg_a] < self.reg[reg_b]:\n self.fl = 4\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.fl = 2\n elif self.reg[reg_a] == self.reg[reg_b]:\n self.fl = 1\n elif op == 'AND':\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n elif op == 'OR':\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n elif op == 'XOR':\n self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]\n elif op == 'NOT':\n self.reg[reg_a] = ~self.reg[reg_a]\n elif op == 'SHL':\n self.reg[reg_a] = self.reg[reg_a] << self.reg[reg_b]\n elif op == 'SHR':\n self.reg[reg_a] = self.reg[reg_a] >> self.reg[reg_b]\n elif op == 'MOD':\n if self.reg[reg_b] == 0:\n print('ERROR: divide by 0')\n self.op_hlt()\n else:\n remainder = self.reg[reg_a] % self.reg[reg_b]\n self.reg[reg_a] = remainder\n else:\n raise Exception('Unsupported ALU operation')\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n print(f'TRACE: %02X | %02X %02X %02X |' % (self.pc, self.ram_read(\n self.pc), self.ram_read(self.pc + 1), self.ram_read(self.pc + 2\n )), end='')\n for i in range(8):\n print(' %02X' % self.reg[i], end='')\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.trace()\n while self.running is True:\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n op_size = IR >> 6\n ins_set = IR >> 4 & 1 == 1\n if not ins_set:\n self.pc += op_size + 1\n if IR in self.branchtable:\n self.branchtable[IR](operand_a, operand_b)\n",
"step-4": "<mask token>\n\n\nclass CPU:\n <mask token>\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 8\n self.pc = 0\n self.ram = [0] * 256\n self.running = True\n self.reg[7] = 244\n self.sp = self.reg[7]\n self.fl = 0\n self.branchtable = {}\n self.branchtable[HLT] = self.op_hlt\n self.branchtable[LDI] = self.op_ldi\n self.branchtable[PRN] = self.op_prn\n self.branchtable[MUL] = self.op_mul\n self.branchtable[PUSH] = self.op_push\n self.branchtable[POP] = self.op_pop\n self.branchtable[CALL] = self.op_call\n self.branchtable[RET] = self.op_ret\n self.branchtable[ADD] = self.op_add\n self.branchtable[CMP] = self.op_cmp\n self.branchtable[JMP] = self.op_jmp\n self.branchtable[JEQ] = self.op_jeq\n self.branchtable[JNE] = self.op_jne\n self.branchtable[AND] = self.op_and\n self.branchtable[NOT] = self.op_not\n self.branchtable[OR] = self.op_or\n self.branchtable[XOR] = self.op_xor\n self.branchtable[SHL] = self.op_shl\n self.branchtable[SHR] = self.op_shr\n self.branchtable[MOD] = self.op_mod\n\n def ram_read(self, MAR):\n return self.ram[MAR]\n\n def ram_write(self, MAR, MDR):\n self.ram[MAR] = MDR\n\n def op_hlt(self, operand_a, operand_b):\n self.running = False\n\n def op_ldi(self, operand_a, operand_b):\n self.reg[operand_a] = operand_b\n\n def op_prn(self, operand_a, operand_b):\n print('prn:', self.reg[operand_a])\n\n def op_mul(self, operand_a, operand_b):\n self.alu('MUL', operand_a, operand_b)\n\n def op_push(self, operand_a, operand_b):\n self.sp -= 1\n val = self.reg[operand_a]\n self.ram_write(self.sp, val)\n\n def op_pop(self, operand_a, operand_b):\n self.reg[operand_a] = self.ram_read(self.sp)\n self.sp += 1\n\n def op_call(self, operand_a, operand_b):\n ret_addr = self.pc + 2\n self.sp -= 1\n self.ram_write(self.sp, ret_addr)\n sub_addr = self.reg[operand_a]\n self.pc = sub_addr\n\n def op_ret(self, operand_a, operand_b):\n ret_addr = self.ram_read(self.sp)\n self.sp += 1\n self.pc = ret_addr\n\n def op_add(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_cmp(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n <mask token>\n\n def op_jeq(self, operand_a, operand_b):\n if self.fl == 1:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_jne(self, operand_a, operand_b):\n if self.fl != 1:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_and(self, operand_a, operand_b):\n self.alu('AND', operand_a, operand_b)\n\n def op_or(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_xor(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_not(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_shl(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_shr(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_mod(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def load(self, filename):\n \"\"\"Load a program into memory.\"\"\"\n address = 0\n with open(filename) as file:\n for line in file:\n val = line.split('#')[0].strip()\n if val == '':\n continue\n instruction = int(val, 2)\n self.ram[address] = instruction\n address += 1\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n if op == 'ADD':\n self.reg[reg_a] = self.reg[reg_a] + self.reg[reg_b]\n elif op == 'MUL':\n self.reg[reg_a] = self.reg[reg_a] * self.reg[reg_b]\n elif op == 'CMP':\n if self.reg[reg_a] < self.reg[reg_b]:\n self.fl = 4\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.fl = 2\n elif self.reg[reg_a] == self.reg[reg_b]:\n self.fl = 1\n elif op == 'AND':\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n elif op == 'OR':\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n elif op == 'XOR':\n self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]\n elif op == 'NOT':\n self.reg[reg_a] = ~self.reg[reg_a]\n elif op == 'SHL':\n self.reg[reg_a] = self.reg[reg_a] << self.reg[reg_b]\n elif op == 'SHR':\n self.reg[reg_a] = self.reg[reg_a] >> self.reg[reg_b]\n elif op == 'MOD':\n if self.reg[reg_b] == 0:\n print('ERROR: divide by 0')\n self.op_hlt()\n else:\n remainder = self.reg[reg_a] % self.reg[reg_b]\n self.reg[reg_a] = remainder\n else:\n raise Exception('Unsupported ALU operation')\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n print(f'TRACE: %02X | %02X %02X %02X |' % (self.pc, self.ram_read(\n self.pc), self.ram_read(self.pc + 1), self.ram_read(self.pc + 2\n )), end='')\n for i in range(8):\n print(' %02X' % self.reg[i], end='')\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.trace()\n while self.running is True:\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n op_size = IR >> 6\n ins_set = IR >> 4 & 1 == 1\n if not ins_set:\n self.pc += op_size + 1\n if IR in self.branchtable:\n self.branchtable[IR](operand_a, operand_b)\n",
"step-5": "\"\"\"CPU functionality.\"\"\"\n\nimport sys\nHLT = 0b00000001\nLDI = 0b10000010\nPRN = 0b01000111\nMUL = 0b10100010\nPUSH = 0b01000101\nPOP = 0b01000110\nCMP = 0b10100111\nCALL = 0b01010000\nRET = 0b00010001\nADD = 0b10100000\nCMP = 0b10100111\nJMP = 0b01010100\nJEQ = 0b01010101\nJNE = 0b01010110\nAND = 0b10101000\nNOT = 0b01101001\nOR = 0b10101010\nXOR = 0b10101011\nSHL = 0b10101100\nSHR = 0b10101101\nMOD = 0b10100100\n\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 8\n self.pc = 0\n self.ram = [0] * 256\n self.running = True\n self.reg[7] = 0xf4\n self.sp = self.reg[7]\n self.fl = 0b00000000\n self.branchtable = {}\n self.branchtable[HLT] = self.op_hlt\n self.branchtable[LDI] = self.op_ldi\n self.branchtable[PRN] = self.op_prn\n self.branchtable[MUL] = self.op_mul\n self.branchtable[PUSH] = self.op_push\n self.branchtable[POP] = self.op_pop\n self.branchtable[CALL] = self.op_call\n self.branchtable[RET] = self.op_ret\n self.branchtable[ADD] = self.op_add\n self.branchtable[CMP] = self.op_cmp\n self.branchtable[JMP] = self.op_jmp\n self.branchtable[JEQ] = self.op_jeq\n self.branchtable[JNE] = self.op_jne\n self.branchtable[AND] = self.op_and\n self.branchtable[NOT] = self.op_not\n self.branchtable[OR] = self.op_or\n self.branchtable[XOR] = self.op_xor\n self.branchtable[SHL] = self.op_shl\n self.branchtable[SHR] = self.op_shr\n self.branchtable[MOD] = self.op_mod\n\n def ram_read(self, MAR):\n return self.ram[MAR]\n\n def ram_write(self, MAR, MDR):\n self.ram[MAR] = MDR\n\n def op_hlt(self, operand_a, operand_b):\n self.running = False\n\n def op_ldi(self, operand_a, operand_b):\n self.reg[operand_a] = operand_b\n # self.pc += 3\n\n def op_prn(self, operand_a, operand_b):\n print('prn:', self.reg[operand_a])\n # self.pc += 2\n\n def op_mul(self, operand_a, operand_b):\n self.alu('MUL', operand_a, operand_b)\n # self.pc += 3\n\n def op_push(self, operand_a, operand_b):\n self.sp -= 1\n val = self.reg[operand_a]\n self.ram_write(self.sp, val)\n # self.pc += 2\n\n def op_pop(self, operand_a, operand_b):\n self.reg[operand_a] = self.ram_read(self.sp)\n # self.pc += 2\n self.sp += 1\n\n def op_call(self, operand_a, operand_b):\n ret_addr = self.pc + 2\n self.sp -= 1\n self.ram_write(self.sp, ret_addr) # write sp and pc location to ram\n sub_addr = self.reg[operand_a]\n self.pc = sub_addr\n\n def op_ret(self, operand_a, operand_b):\n ret_addr = self.ram_read(self.sp) # set ret_addr to location in ram\n self.sp += 1\n self.pc = ret_addr\n\n def op_add(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_cmp(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_jmp(self, operand_a, operand_b):\n self.pc = self.reg[operand_a]\n\n def op_jeq(self, operand_a, operand_b):\n if self.fl == 0b00000001:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_jne(self, operand_a, operand_b):\n if self.fl != 0b00000001:\n self.op_jmp(operand_a, operand_b)\n else:\n self.pc += 2\n\n def op_and(self, operand_a, operand_b):\n self.alu('AND', operand_a, operand_b)\n\n def op_or(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_xor(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_not(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_shl(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def op_shr(self, operand_a, operand_b):\n self.alu('ADD', operand_a, operand_b)\n\n def op_mod(self, operand_a, operand_b):\n self.alu('CMP', operand_a, operand_b)\n\n def load(self, filename):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n with open(filename) as file:\n for line in file:\n val = line.split(\"#\")[0].strip()\n if val == '':\n continue\n instruction = int(val, 2)\n self.ram[address] = instruction\n address += 1\n\n # For now, we've just hardcoded a program:\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n # for instruction in program:\n # self.ram[address] = instruction\n # address += 1\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == 'ADD':\n self.reg[reg_a] = self.reg[reg_a] + self.reg[reg_b]\n elif op == 'MUL':\n self.reg[reg_a] = self.reg[reg_a] * self.reg[reg_b]\n elif op == 'CMP':\n if self.reg[reg_a] < self.reg[reg_b]:\n self.fl = 0b00000100\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.fl = 0b00000010\n elif self.reg[reg_a] == self.reg[reg_b]:\n self.fl = 0b00000001\n elif op == 'AND':\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n elif op == 'OR':\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n elif op == 'XOR':\n self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]\n elif op == 'NOT':\n self.reg[reg_a] = ~self.reg[reg_a]\n elif op == 'SHL':\n self.reg[reg_a] = self.reg[reg_a] << self.reg[reg_b]\n elif op == 'SHR':\n self.reg[reg_a] = self.reg[reg_a] >> self.reg[reg_b]\n elif op == 'MOD':\n if self.reg[reg_b] == 0:\n print('ERROR: divide by 0')\n self.op_hlt()\n else:\n remainder = self.reg[reg_a] % self.reg[reg_b]\n self.reg[reg_a] = remainder\n\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n # self.fl,\n # self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.trace()\n\n while self.running is True:\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n\n # This increments the pc position automatically\n op_size = IR >> 6\n ins_set = ((IR >> 4) & 0b1) == 1\n if not ins_set:\n self.pc += op_size + 1\n\n if IR in self.branchtable:\n self.branchtable[IR](operand_a, operand_b)\n\n# SAVE WHERE WE'RE COMING FROM TO THE STACK AND SET PC TO WHERE WE'RE GOING\n",
"step-ids": [
13,
22,
23,
27,
32
]
}
|
[
13,
22,
23,
27,
32
] |
# models.py- Team
from django.db import models
class Team(models.Model):
teamName = models.TextField()
#Seasons associated
#Registrants unique
return
|
normal
|
{
"blob_id": "331b5f0a34db4d12d713439db3d2818e8c922310",
"index": 4236,
"step-1": "<mask token>\n\n\nclass Team(models.Model):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\nreturn\n",
"step-4": "from django.db import models\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\nreturn\n",
"step-5": "# models.py- Team\nfrom django.db import models\n\n\nclass Team(models.Model):\n \n teamName = models.TextField()\n\n #Seasons associated\n #Registrants unique\n\nreturn \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^$', views.homepage, name='homepage'), url('^search/',
views.my_search_view, name='article_detail')]
<|reserved_special_token_1|>
from django.conf.urls import url
from myapp import views
urlpatterns = [url('^$', views.homepage, name='homepage'), url('^search/',
views.my_search_view, name='article_detail')]
<|reserved_special_token_1|>
from django.conf.urls import url
from myapp import views
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^search/', views.my_search_view, name = 'article_detail')
]
|
flexible
|
{
"blob_id": "388e43850a2e114cfe7869293ee814831a088b3e",
"index": 8468,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', views.homepage, name='homepage'), url('^search/',\n views.my_search_view, name='article_detail')]\n",
"step-3": "from django.conf.urls import url\nfrom myapp import views\nurlpatterns = [url('^$', views.homepage, name='homepage'), url('^search/',\n views.my_search_view, name='article_detail')]\n",
"step-4": "from django.conf.urls import url\nfrom myapp import views\n\nurlpatterns = [\n url(r'^$', views.homepage, name='homepage'),\n url(r'^search/', views.my_search_view, name = 'article_detail')\n ]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class StackFrontExtractor(FrontExtractorOp):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StackFrontExtractor(FrontExtractorOp):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
update_attrs = {'axis': attrs.int('axis', 0)}
PackOp.update_node_stat(node, update_attrs)
return cls.enabled
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StackFrontExtractor(FrontExtractorOp):
op = 'stack'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
update_attrs = {'axis': attrs.int('axis', 0)}
PackOp.update_node_stat(node, update_attrs)
return cls.enabled
<|reserved_special_token_1|>
from openvino.tools.mo.ops.pack import PackOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
class StackFrontExtractor(FrontExtractorOp):
op = 'stack'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
update_attrs = {'axis': attrs.int('axis', 0)}
PackOp.update_node_stat(node, update_attrs)
return cls.enabled
<|reserved_special_token_1|>
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.pack import PackOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
class StackFrontExtractor(FrontExtractorOp):
op = 'stack'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
update_attrs = {
'axis': attrs.int('axis', 0)
}
# update the attributes of the node
PackOp.update_node_stat(node, update_attrs)
return cls.enabled
|
flexible
|
{
"blob_id": "dd71feda1ed5ff7ef9dee1573ad63939a3e09691",
"index": 7526,
"step-1": "<mask token>\n\n\nclass StackFrontExtractor(FrontExtractorOp):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StackFrontExtractor(FrontExtractorOp):\n <mask token>\n <mask token>\n\n @classmethod\n def extract(cls, node):\n attrs = get_mxnet_layer_attrs(node.symbol_dict)\n update_attrs = {'axis': attrs.int('axis', 0)}\n PackOp.update_node_stat(node, update_attrs)\n return cls.enabled\n",
"step-3": "<mask token>\n\n\nclass StackFrontExtractor(FrontExtractorOp):\n op = 'stack'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n attrs = get_mxnet_layer_attrs(node.symbol_dict)\n update_attrs = {'axis': attrs.int('axis', 0)}\n PackOp.update_node_stat(node, update_attrs)\n return cls.enabled\n",
"step-4": "from openvino.tools.mo.ops.pack import PackOp\nfrom openvino.tools.mo.front.extractor import FrontExtractorOp\nfrom openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs\n\n\nclass StackFrontExtractor(FrontExtractorOp):\n op = 'stack'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n attrs = get_mxnet_layer_attrs(node.symbol_dict)\n update_attrs = {'axis': attrs.int('axis', 0)}\n PackOp.update_node_stat(node, update_attrs)\n return cls.enabled\n",
"step-5": "# Copyright (C) 2018-2023 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nfrom openvino.tools.mo.ops.pack import PackOp\nfrom openvino.tools.mo.front.extractor import FrontExtractorOp\nfrom openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs\n\n\nclass StackFrontExtractor(FrontExtractorOp):\n op = 'stack'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n attrs = get_mxnet_layer_attrs(node.symbol_dict)\n\n update_attrs = {\n 'axis': attrs.int('axis', 0)\n }\n\n # update the attributes of the node\n PackOp.update_node_stat(node, update_attrs)\n\n return cls.enabled\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from __future__ import annotations
from VersionControl.Branch import Branch
from Branches.Actions.Actions import Actions
from VersionControl.Git.Branches.Develop.Init import Init
class Develop(Branch):
def process(self):
if self.action is Actions.INIT:
self.start_message('Develop Init')
Init(self.state_handler, self.config_handler).process()
else:
raise NotImplementedError
|
normal
|
{
"blob_id": "338bf2406c233d857e1a688391161d58e1dab23c",
"index": 8910,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Develop(Branch):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Develop(Branch):\n\n def process(self):\n if self.action is Actions.INIT:\n self.start_message('Develop Init')\n Init(self.state_handler, self.config_handler).process()\n else:\n raise NotImplementedError\n",
"step-4": "from __future__ import annotations\nfrom VersionControl.Branch import Branch\nfrom Branches.Actions.Actions import Actions\nfrom VersionControl.Git.Branches.Develop.Init import Init\n\n\nclass Develop(Branch):\n\n def process(self):\n if self.action is Actions.INIT:\n self.start_message('Develop Init')\n Init(self.state_handler, self.config_handler).process()\n else:\n raise NotImplementedError\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class settings:
def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):
self.xmax = xmax
self.xmin = xmin
self.ymax = ymax
self.ymin = ymin
self.yrange = yrange
self.xrange = xrange
pass
<|reserved_special_token_0|>
class MapMinMaxApplier(object):
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __call__(self, x):
return x * self.slope + self.intercept
def reverse(self, y):
return (y - self.intercept) / self.slope
def mapminmax_rev(x, ymin=-1, ymax=+1):
x = np.asanyarray(x)
xmax = x.max(axis=-1)
xmin = x.min(axis=-1)
if (xmax == xmin).any():
raise ValueError('some rows have no variation')
slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]
intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin
ps = MapMinMaxApplier(slope, intercept)
return ps(x), ps
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class settings:
def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):
self.xmax = xmax
self.xmin = xmin
self.ymax = ymax
self.ymin = ymin
self.yrange = yrange
self.xrange = xrange
pass
<|reserved_special_token_0|>
def create(x, ymin, ymax):
xrows = x.shape[0]
xmin = x.min(1)
xmax = x.max(1)
xrange = xmax - xmin
yrows = xrows
yrange = ymax - ymin
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if not all(fix):
None
else:
gain[fix] = 1
xmin[fix] = ymin
return [mapminmax_apply(x, xrange, xmin, yrange, ymin), settings(xmax=
xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]
<|reserved_special_token_0|>
class MapMinMaxApplier(object):
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __call__(self, x):
return x * self.slope + self.intercept
def reverse(self, y):
return (y - self.intercept) / self.slope
def mapminmax_rev(x, ymin=-1, ymax=+1):
x = np.asanyarray(x)
xmax = x.max(axis=-1)
xmin = x.min(axis=-1)
if (xmax == xmin).any():
raise ValueError('some rows have no variation')
slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]
intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin
ps = MapMinMaxApplier(slope, intercept)
return ps(x), ps
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class settings:
def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):
self.xmax = xmax
self.xmin = xmin
self.ymax = ymax
self.ymin = ymin
self.yrange = yrange
self.xrange = xrange
pass
<|reserved_special_token_0|>
def create(x, ymin, ymax):
xrows = x.shape[0]
xmin = x.min(1)
xmax = x.max(1)
xrange = xmax - xmin
yrows = xrows
yrange = ymax - ymin
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if not all(fix):
None
else:
gain[fix] = 1
xmin[fix] = ymin
return [mapminmax_apply(x, xrange, xmin, yrange, ymin), settings(xmax=
xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]
def mapminmax_apply(x, xrange, xmin, yrange, ymin):
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if not all(fix):
None
else:
gain[fix] = 1
xmin[fix] = ymin
cd = np.multiply(np.ones((x.shape[0], x.shape[1])), xmin.values.reshape
(x.shape[0], 1))
a = x - cd
b = np.multiply(np.ones((x.shape[0], x.shape[1])), gain.values.reshape(
x.shape[0], 1))
return np.multiply(a, b) + ymin
class MapMinMaxApplier(object):
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __call__(self, x):
return x * self.slope + self.intercept
def reverse(self, y):
return (y - self.intercept) / self.slope
def mapminmax_rev(x, ymin=-1, ymax=+1):
x = np.asanyarray(x)
xmax = x.max(axis=-1)
xmin = x.min(axis=-1)
if (xmax == xmin).any():
raise ValueError('some rows have no variation')
slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]
intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin
ps = MapMinMaxApplier(slope, intercept)
return ps(x), ps
<|reserved_special_token_1|>
import numpy as np
class settings:
def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):
self.xmax = xmax
self.xmin = xmin
self.ymax = ymax
self.ymin = ymin
self.yrange = yrange
self.xrange = xrange
pass
def mapminmax(x, ymin=-1.0, ymax=1.0):
return create(x, ymin, ymax)
def create(x, ymin, ymax):
xrows = x.shape[0]
xmin = x.min(1)
xmax = x.max(1)
xrange = xmax - xmin
yrows = xrows
yrange = ymax - ymin
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if not all(fix):
None
else:
gain[fix] = 1
xmin[fix] = ymin
return [mapminmax_apply(x, xrange, xmin, yrange, ymin), settings(xmax=
xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]
def mapminmax_apply(x, xrange, xmin, yrange, ymin):
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if not all(fix):
None
else:
gain[fix] = 1
xmin[fix] = ymin
cd = np.multiply(np.ones((x.shape[0], x.shape[1])), xmin.values.reshape
(x.shape[0], 1))
a = x - cd
b = np.multiply(np.ones((x.shape[0], x.shape[1])), gain.values.reshape(
x.shape[0], 1))
return np.multiply(a, b) + ymin
class MapMinMaxApplier(object):
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __call__(self, x):
return x * self.slope + self.intercept
def reverse(self, y):
return (y - self.intercept) / self.slope
def mapminmax_rev(x, ymin=-1, ymax=+1):
x = np.asanyarray(x)
xmax = x.max(axis=-1)
xmin = x.min(axis=-1)
if (xmax == xmin).any():
raise ValueError('some rows have no variation')
slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]
intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin
ps = MapMinMaxApplier(slope, intercept)
return ps(x), ps
<|reserved_special_token_1|>
import numpy as np
class settings:
def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):
self.xmax = xmax
self.xmin = xmin
self.ymax = ymax
self.ymin = ymin
self.yrange = yrange
self.xrange = xrange
pass
def mapminmax(x, ymin=-1.0, ymax=1.0):
return create(x, ymin, ymax)
def create(x, ymin, ymax):
xrows = x.shape[0]
xmin = x.min(1)
xmax = x.max(1)
xrange = xmax - xmin
yrows = xrows
yrange = ymax - ymin
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if(not all(fix)):
None
else:
gain[fix] = 1
xmin[fix] = ymin
return [mapminmax_apply(x, xrange, xmin, yrange, ymin),
settings(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]
def mapminmax_apply(x, xrange, xmin, yrange, ymin):
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if(not all(fix)):
None
else:
gain[fix] = 1
xmin[fix] = ymin
cd = np.multiply((np.ones((x.shape[0], x.shape[1]))), xmin.values.reshape(x.shape[0], 1))
a = x - cd
b = np.multiply((np.ones((x.shape[0], x.shape[1]))), gain.values.reshape(x.shape[0], 1))
return np.multiply(a, b) + ymin
class MapMinMaxApplier(object):
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __call__(self, x):
return x * self.slope + self.intercept
def reverse(self, y):
return (y-self.intercept) / self.slope
def mapminmax_rev(x, ymin=-1, ymax=+1):
x = np.asanyarray(x)
xmax = x.max(axis=-1)
xmin = x.min(axis=-1)
if (xmax==xmin).any():
raise ValueError("some rows have no variation")
slope = ((ymax-ymin) / (xmax - xmin))[:,np.newaxis]
intercept = (-xmin*(ymax-ymin)/(xmax-xmin))[:,np.newaxis] + ymin
ps = MapMinMaxApplier(slope, intercept)
return ps(x), ps
|
flexible
|
{
"blob_id": "e4a66617adbe863459e33f77c32c89e901f66995",
"index": 2309,
"step-1": "<mask token>\n\n\nclass settings:\n\n def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):\n self.xmax = xmax\n self.xmin = xmin\n self.ymax = ymax\n self.ymin = ymin\n self.yrange = yrange\n self.xrange = xrange\n pass\n\n\n<mask token>\n\n\nclass MapMinMaxApplier(object):\n\n def __init__(self, slope, intercept):\n self.slope = slope\n self.intercept = intercept\n\n def __call__(self, x):\n return x * self.slope + self.intercept\n\n def reverse(self, y):\n return (y - self.intercept) / self.slope\n\n\ndef mapminmax_rev(x, ymin=-1, ymax=+1):\n x = np.asanyarray(x)\n xmax = x.max(axis=-1)\n xmin = x.min(axis=-1)\n if (xmax == xmin).any():\n raise ValueError('some rows have no variation')\n slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]\n intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin\n ps = MapMinMaxApplier(slope, intercept)\n return ps(x), ps\n",
"step-2": "<mask token>\n\n\nclass settings:\n\n def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):\n self.xmax = xmax\n self.xmin = xmin\n self.ymax = ymax\n self.ymin = ymin\n self.yrange = yrange\n self.xrange = xrange\n pass\n\n\n<mask token>\n\n\ndef create(x, ymin, ymax):\n xrows = x.shape[0]\n xmin = x.min(1)\n xmax = x.max(1)\n xrange = xmax - xmin\n yrows = xrows\n yrange = ymax - ymin\n gain = yrange / xrange\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\n if not all(fix):\n None\n else:\n gain[fix] = 1\n xmin[fix] = ymin\n return [mapminmax_apply(x, xrange, xmin, yrange, ymin), settings(xmax=\n xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]\n\n\n<mask token>\n\n\nclass MapMinMaxApplier(object):\n\n def __init__(self, slope, intercept):\n self.slope = slope\n self.intercept = intercept\n\n def __call__(self, x):\n return x * self.slope + self.intercept\n\n def reverse(self, y):\n return (y - self.intercept) / self.slope\n\n\ndef mapminmax_rev(x, ymin=-1, ymax=+1):\n x = np.asanyarray(x)\n xmax = x.max(axis=-1)\n xmin = x.min(axis=-1)\n if (xmax == xmin).any():\n raise ValueError('some rows have no variation')\n slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]\n intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin\n ps = MapMinMaxApplier(slope, intercept)\n return ps(x), ps\n",
"step-3": "<mask token>\n\n\nclass settings:\n\n def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):\n self.xmax = xmax\n self.xmin = xmin\n self.ymax = ymax\n self.ymin = ymin\n self.yrange = yrange\n self.xrange = xrange\n pass\n\n\n<mask token>\n\n\ndef create(x, ymin, ymax):\n xrows = x.shape[0]\n xmin = x.min(1)\n xmax = x.max(1)\n xrange = xmax - xmin\n yrows = xrows\n yrange = ymax - ymin\n gain = yrange / xrange\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\n if not all(fix):\n None\n else:\n gain[fix] = 1\n xmin[fix] = ymin\n return [mapminmax_apply(x, xrange, xmin, yrange, ymin), settings(xmax=\n xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]\n\n\ndef mapminmax_apply(x, xrange, xmin, yrange, ymin):\n gain = yrange / xrange\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\n if not all(fix):\n None\n else:\n gain[fix] = 1\n xmin[fix] = ymin\n cd = np.multiply(np.ones((x.shape[0], x.shape[1])), xmin.values.reshape\n (x.shape[0], 1))\n a = x - cd\n b = np.multiply(np.ones((x.shape[0], x.shape[1])), gain.values.reshape(\n x.shape[0], 1))\n return np.multiply(a, b) + ymin\n\n\nclass MapMinMaxApplier(object):\n\n def __init__(self, slope, intercept):\n self.slope = slope\n self.intercept = intercept\n\n def __call__(self, x):\n return x * self.slope + self.intercept\n\n def reverse(self, y):\n return (y - self.intercept) / self.slope\n\n\ndef mapminmax_rev(x, ymin=-1, ymax=+1):\n x = np.asanyarray(x)\n xmax = x.max(axis=-1)\n xmin = x.min(axis=-1)\n if (xmax == xmin).any():\n raise ValueError('some rows have no variation')\n slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]\n intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin\n ps = MapMinMaxApplier(slope, intercept)\n return ps(x), ps\n",
"step-4": "import numpy as np\n\n\nclass settings:\n\n def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):\n self.xmax = xmax\n self.xmin = xmin\n self.ymax = ymax\n self.ymin = ymin\n self.yrange = yrange\n self.xrange = xrange\n pass\n\n\ndef mapminmax(x, ymin=-1.0, ymax=1.0):\n return create(x, ymin, ymax)\n\n\ndef create(x, ymin, ymax):\n xrows = x.shape[0]\n xmin = x.min(1)\n xmax = x.max(1)\n xrange = xmax - xmin\n yrows = xrows\n yrange = ymax - ymin\n gain = yrange / xrange\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\n if not all(fix):\n None\n else:\n gain[fix] = 1\n xmin[fix] = ymin\n return [mapminmax_apply(x, xrange, xmin, yrange, ymin), settings(xmax=\n xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]\n\n\ndef mapminmax_apply(x, xrange, xmin, yrange, ymin):\n gain = yrange / xrange\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\n if not all(fix):\n None\n else:\n gain[fix] = 1\n xmin[fix] = ymin\n cd = np.multiply(np.ones((x.shape[0], x.shape[1])), xmin.values.reshape\n (x.shape[0], 1))\n a = x - cd\n b = np.multiply(np.ones((x.shape[0], x.shape[1])), gain.values.reshape(\n x.shape[0], 1))\n return np.multiply(a, b) + ymin\n\n\nclass MapMinMaxApplier(object):\n\n def __init__(self, slope, intercept):\n self.slope = slope\n self.intercept = intercept\n\n def __call__(self, x):\n return x * self.slope + self.intercept\n\n def reverse(self, y):\n return (y - self.intercept) / self.slope\n\n\ndef mapminmax_rev(x, ymin=-1, ymax=+1):\n x = np.asanyarray(x)\n xmax = x.max(axis=-1)\n xmin = x.min(axis=-1)\n if (xmax == xmin).any():\n raise ValueError('some rows have no variation')\n slope = ((ymax - ymin) / (xmax - xmin))[:, np.newaxis]\n intercept = (-xmin * (ymax - ymin) / (xmax - xmin))[:, np.newaxis] + ymin\n ps = MapMinMaxApplier(slope, intercept)\n return ps(x), ps\n",
"step-5": "\r\nimport numpy as np\r\n\r\nclass settings:\r\n def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):\r\n self.xmax = xmax\r\n self.xmin = xmin\r\n self.ymax = ymax\r\n self.ymin = ymin\r\n self.yrange = yrange\r\n self.xrange = xrange\r\n pass\r\n\r\n\r\ndef mapminmax(x, ymin=-1.0, ymax=1.0):\r\n return create(x, ymin, ymax)\r\n\r\n\r\ndef create(x, ymin, ymax):\r\n xrows = x.shape[0]\r\n xmin = x.min(1)\r\n xmax = x.max(1)\r\n\r\n xrange = xmax - xmin\r\n yrows = xrows\r\n yrange = ymax - ymin\r\n\r\n gain = yrange / xrange\r\n\r\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\r\n\r\n if(not all(fix)):\r\n None\r\n else:\r\n gain[fix] = 1\r\n xmin[fix] = ymin\r\n\r\n return [mapminmax_apply(x, xrange, xmin, yrange, ymin),\r\n settings(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]\r\n\r\n\r\ndef mapminmax_apply(x, xrange, xmin, yrange, ymin):\r\n gain = yrange / xrange\r\n\r\n fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))\r\n if(not all(fix)):\r\n None\r\n else:\r\n gain[fix] = 1\r\n xmin[fix] = ymin\r\n\r\n cd = np.multiply((np.ones((x.shape[0], x.shape[1]))), xmin.values.reshape(x.shape[0], 1))\r\n a = x - cd\r\n\r\n b = np.multiply((np.ones((x.shape[0], x.shape[1]))), gain.values.reshape(x.shape[0], 1))\r\n return np.multiply(a, b) + ymin\r\n\r\n\r\nclass MapMinMaxApplier(object):\r\n def __init__(self, slope, intercept):\r\n self.slope = slope\r\n self.intercept = intercept\r\n def __call__(self, x):\r\n return x * self.slope + self.intercept\r\n def reverse(self, y):\r\n return (y-self.intercept) / self.slope\r\n \r\ndef mapminmax_rev(x, ymin=-1, ymax=+1):\r\n x = np.asanyarray(x)\r\n xmax = x.max(axis=-1)\r\n xmin = x.min(axis=-1)\r\n if (xmax==xmin).any():\r\n raise ValueError(\"some rows have no variation\")\r\n slope = ((ymax-ymin) / (xmax - xmin))[:,np.newaxis]\r\n intercept = (-xmin*(ymax-ymin)/(xmax-xmin))[:,np.newaxis] + ymin\r\n ps = MapMinMaxApplier(slope, intercept)\r\n return ps(x), ps",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import data_helpers
def write_to_file(file,line):
file.write(line+"\n")
def cat_map():
catmap={}
id=1
f=open("cat")
cat=set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i]=id
id=id+1
return catmap
tree = ET.ElementTree(file="test.xml")
root = tree.getroot()
cnn=open("cnn","a")
lstm=open("lstm","a")
cat=open("cat","a")
for vespaadd in root:
document = vespaadd.find("document")
if(document!=None):
subject = document.find("subject")
content = document.find("content")
maincat = document.find("maincat")
if(subject==None):
continue
if(content==None):
content=subject
if(maincat==None):
continue
write_to_file(cnn,data_helpers.clean_str(subject.text))
write_to_file(lstm,data_helpers.clean_str(content.text))
write_to_file(cat,data_helpers.clean_str(maincat.text))
cnn.close()
lstm.close()
cat.close()
|
normal
|
{
"blob_id": "04538cc5c9c68582cc9aa2959faae2d7547ab2ee",
"index": 302,
"step-1": "<mask token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\n<mask token>\n",
"step-2": "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n<mask token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\n<mask token>\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n",
"step-3": "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n<mask token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\ntree = ET.ElementTree(file='test.xml')\nroot = tree.getroot()\ncnn = open('cnn', 'a')\nlstm = open('lstm', 'a')\ncat = open('cat', 'a')\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n",
"step-4": "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nimport data_helpers\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\ntree = ET.ElementTree(file='test.xml')\nroot = tree.getroot()\ncnn = open('cnn', 'a')\nlstm = open('lstm', 'a')\ncat = open('cat', 'a')\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n",
"step-5": "try:\n\timport xml.etree.cElementTree as ET\nexcept ImportError:\n\timport xml.etree.ElementTree as ET\nimport data_helpers\n\ndef write_to_file(file,line):\n\tfile.write(line+\"\\n\")\n\ndef cat_map():\n\tcatmap={}\n\tid=1\n\tf=open(\"cat\")\n\tcat=set([s.strip() for s in list(f.readlines())])\n\tfor i in cat:\n\t\tcatmap[i]=id\n\t\tid=id+1\n\treturn catmap\n\ntree = ET.ElementTree(file=\"test.xml\")\nroot = tree.getroot()\ncnn=open(\"cnn\",\"a\")\nlstm=open(\"lstm\",\"a\")\ncat=open(\"cat\",\"a\")\nfor vespaadd in root:\n\tdocument = vespaadd.find(\"document\")\n\tif(document!=None):\n\t\tsubject = document.find(\"subject\")\n\t\tcontent = document.find(\"content\")\n\t\tmaincat = document.find(\"maincat\")\n\t\tif(subject==None):\n\t\t\tcontinue\n\t\tif(content==None):\n\t\t\tcontent=subject\n\t\tif(maincat==None):\n\t\t\tcontinue\n\t\twrite_to_file(cnn,data_helpers.clean_str(subject.text))\n\t\twrite_to_file(lstm,data_helpers.clean_str(content.text))\n\t\twrite_to_file(cat,data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# Taken from: https://github.com/flyyufelix/cnn_finetune/blob/master/vgg16.py
# based on: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
# -*- coding: utf-8 -*-
import keras
import itertools
import sys
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping, CSVLogger
from keras.preprocessing.image import ImageDataGenerator
from skimage import io, color, exposure, transform
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from sklearn.metrics import log_loss
from new_load_GTSRB_Inception import load_GTSRB_data_1
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
# Variables to run the script with a bat-script
dropout_rate= float(sys.argv[1])#0.5
lr= float(sys.argv[2] )#1e-3
batch_size= int(sys.argv[3])#10
weights_filename= 'vgg16_weights_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.h5'
matrix_filename= 'conf_matrix_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.png'
log_filename='log_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30'
result_file='result_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.txt'
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
#Utility function to apply conv + BN for Inception V3.
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def lr_schedule(epoch): # function that takes an epoch index as input and returns a new learning rate as output
return lr*(0.1**int(epoch/10))
if __name__ == '__main__':
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 43
# batch_size = 10 # 20
nb_epoch = 30
# Load data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid, Y_test = load_GTSRB_data_1(img_rows, img_cols)
# Load our model
print("loading model")
model = inception_v3_model(img_rows, img_cols, channel, num_classes)
csv_logger=CSVLogger('training.log') # callback that streams epoch results to a csv file
print("start fine tuning")
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_split=0.2, # fraction of the data held-out for validation
callbacks=[LearningRateScheduler(lr_schedule), history,csv_logger,
ModelCheckpoint(weights_filename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')]
)
# ModelCheckpoint('incep_weights.{epoch:02d}-{val_loss:.2f}.h5',
#EarlyStopping(monitor='val_loss', patience=2, verbose=0),
#Get history of accuracy and plot it
# print("hhistory acc: ",history.acc)
# print(" history acc type: ", type(history.acc))
#np.save('history_acc_inception', history.acc)
#plt.plot(range(1,nb_epoch+1), history.acc)
#plt.xlabel('Epochs')
#plt.ylabel('Accuracy')
#plt.title("Inception")
#plt.show()
y_pred= model.predict_classes(X_valid)
print("Predictions: ", y_pred)
model.metrics_names
y_eval=model.evaluate(X_valid,Y_valid)
print("Evaluation: ", y_eval)
f=open(result_file, 'w')
f.write('Y_pred: ' + str(y_pred) )
f.write('Y_eval: ' + str(y_eval))
f.close()
cm=confusion_matrix(Y_test, y_pred) # confusion matrix
print(cm)
plt.matshow(cm)
plt.title('Confusion matrix InceptionV3')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#plt.show()
plt.savefig(matrix_filename)
plt.close()
print("Done!")
|
normal
|
{
"blob_id": "906b7f02d6a7968bbf4780e682d4f9a92526326a",
"index": 9123,
"step-1": "\r\n# Taken from: https://github.com/flyyufelix/cnn_finetune/blob/master/vgg16.py \r\n# based on: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3\r\n\r\n# -*- coding: utf-8 -*-\r\nimport keras\r\nimport itertools\r\nimport sys\r\nfrom sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nfrom keras.models import Sequential\r\nfrom keras.optimizers import SGD\r\nfrom keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.models import Model\r\nfrom keras import backend as K\r\nfrom keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping, CSVLogger\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom skimage import io, color, exposure, transform\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import log_loss\r\n\r\nfrom new_load_GTSRB_Inception import load_GTSRB_data_1\r\n\r\nclass AccuracyHistory(keras.callbacks.Callback):\r\n def on_train_begin(self, logs={}):\r\n self.acc = []\r\n\r\n def on_epoch_end(self, batch, logs={}):\r\n self.acc.append(logs.get('acc'))\r\n\r\nhistory = AccuracyHistory()\r\n\r\n# Variables to run the script with a bat-script\r\ndropout_rate= float(sys.argv[1])#0.5\r\nlr= float(sys.argv[2] )#1e-3\r\nbatch_size= int(sys.argv[3])#10\r\nweights_filename= 'vgg16_weights_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.h5'\r\nmatrix_filename= 'conf_matrix_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.png'\r\nlog_filename='log_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30'\r\nresult_file='result_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.txt'\r\n \r\n\r\ndef conv2d_bn(x, nb_filter, nb_row, nb_col,\r\n border_mode='same', subsample=(1, 1),\r\n name=None):\r\n \r\n #Utility function to apply conv + BN for Inception V3.\r\n\r\n if name is not None:\r\n bn_name = name + '_bn'\r\n conv_name = name + '_conv'\r\n else:\r\n bn_name = None\r\n conv_name = None\r\n bn_axis = 1\r\n x = Convolution2D(nb_filter, nb_row, nb_col,\r\n subsample=subsample,\r\n activation='relu',\r\n border_mode=border_mode,\r\n name=conv_name)(x)\r\n x = BatchNormalization(axis=bn_axis, name=bn_name)(x)\r\n return x\r\n\r\ndef inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):\r\n channel_axis = 1\r\n img_input = Input(shape=(channel, img_rows, img_cols))\r\n x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')\r\n x = conv2d_bn(x, 32, 3, 3, border_mode='valid')\r\n x = conv2d_bn(x, 64, 3, 3)\r\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\r\n\r\n x = conv2d_bn(x, 80, 1, 1, border_mode='valid')\r\n x = conv2d_bn(x, 192, 3, 3, border_mode='valid')\r\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\r\n\r\n # mixed 0, 1, 2: 35 x 35 x 256\r\n for i in range(3):\r\n branch1x1 = conv2d_bn(x, 64, 1, 1)\r\n\r\n branch5x5 = conv2d_bn(x, 48, 1, 1)\r\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\r\n\r\n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\r\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\r\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\r\n\r\n branch_pool = AveragePooling2D(\r\n (3, 3), strides=(1, 1), border_mode='same')(x)\r\n branch_pool = conv2d_bn(branch_pool, 32, 1, 1)\r\n x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed' + str(i))\r\n\r\n # mixed 3: 17 x 17 x 768\r\n branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')\r\n\r\n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\r\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\r\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,\r\n subsample=(2, 2), border_mode='valid')\r\n\r\n branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)\r\n x = merge([branch3x3, branch3x3dbl, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed3')\r\n\r\n # mixed 4: 17 x 17 x 768\r\n branch1x1 = conv2d_bn(x, 192, 1, 1)\r\n\r\n branch7x7 = conv2d_bn(x, 128, 1, 1)\r\n branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)\r\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\r\n\r\n branch7x7dbl = conv2d_bn(x, 128, 1, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\r\n\r\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)\r\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\r\n x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed4')\r\n\r\n # mixed 5, 6: 17 x 17 x 768\r\n for i in range(2):\r\n branch1x1 = conv2d_bn(x, 192, 1, 1)\r\n\r\n branch7x7 = conv2d_bn(x, 160, 1, 1)\r\n branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)\r\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\r\n\r\n branch7x7dbl = conv2d_bn(x, 160, 1, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\r\n\r\n branch_pool = AveragePooling2D(\r\n (3, 3), strides=(1, 1), border_mode='same')(x)\r\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\r\n x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed' + str(5 + i))\r\n\r\n # mixed 7: 17 x 17 x 768\r\n branch1x1 = conv2d_bn(x, 192, 1, 1)\r\n\r\n branch7x7 = conv2d_bn(x, 192, 1, 1)\r\n branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)\r\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\r\n\r\n branch7x7dbl = conv2d_bn(x, 160, 1, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)\r\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\r\n\r\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)\r\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\r\n x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed7')\r\n\r\n # mixed 8: 8 x 8 x 1280\r\n branch3x3 = conv2d_bn(x, 192, 1, 1)\r\n branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,\r\n subsample=(2, 2), border_mode='valid')\r\n\r\n branch7x7x3 = conv2d_bn(x, 192, 1, 1)\r\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)\r\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)\r\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,\r\n subsample=(2, 2), border_mode='valid')\r\n\r\n branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)\r\n x = merge([branch3x3, branch7x7x3, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed8')\r\n\r\n # mixed 9: 8 x 8 x 2048\r\n for i in range(2):\r\n branch1x1 = conv2d_bn(x, 320, 1, 1)\r\n\r\n branch3x3 = conv2d_bn(x, 384, 1, 1)\r\n branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)\r\n branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)\r\n branch3x3 = merge([branch3x3_1, branch3x3_2],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed9_' + str(i))\r\n\r\n branch3x3dbl = conv2d_bn(x, 448, 1, 1)\r\n branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)\r\n branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)\r\n branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)\r\n branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],\r\n mode='concat', concat_axis=channel_axis)\r\n\r\n branch_pool = AveragePooling2D(\r\n (3, 3), strides=(1, 1), border_mode='same')(x)\r\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\r\n x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],\r\n mode='concat', concat_axis=channel_axis,\r\n name='mixed' + str(9 + i))\r\n\r\n # Fully Connected Softmax Layer\r\n x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)\r\n x_fc = Flatten(name='flatten')(x_fc)\r\n x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)\r\n\r\n # Create model\r\n model = Model(img_input, x_fc)\r\n\r\n # Load ImageNet pre-trained data \r\n model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')\r\n\r\n # Truncate and replace softmax layer for transfer learning\r\n # Cannot use model.layers.pop() since model is not of Sequential() type\r\n # The method below works since pre-trained weights are stored in layers but not in the model\r\n x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)\r\n x_newfc = Flatten(name='flatten')(x_newfc)\r\n x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)\r\n\r\n # Create another model with our customized softmax\r\n model = Model(img_input, x_newfc)\r\n\r\n # Learning rate is changed to 0.001\r\n sgd = SGD(lr, decay=1e-6, momentum=0.9, nesterov=True)\r\n model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n return model \r\n\r\ndef lr_schedule(epoch): # function that takes an epoch index as input and returns a new learning rate as output\r\n return lr*(0.1**int(epoch/10))\r\n\r\nif __name__ == '__main__':\r\n\r\n img_rows, img_cols = 299, 299 # Resolution of inputs\r\n channel = 3\r\n num_classes = 43 \r\n # batch_size = 10 # 20\r\n nb_epoch = 30\r\n\r\n # Load data. Please implement your own load_data() module for your own dataset\r\n X_train, Y_train, X_valid, Y_valid, Y_test = load_GTSRB_data_1(img_rows, img_cols)\r\n\r\n # Load our model\r\n print(\"loading model\")\r\n model = inception_v3_model(img_rows, img_cols, channel, num_classes)\r\n\r\n csv_logger=CSVLogger('training.log') # callback that streams epoch results to a csv file\r\n\r\n print(\"start fine tuning\")\r\n # Start Fine-tuning\r\n model.fit(X_train, Y_train,\r\n batch_size=batch_size,\r\n nb_epoch=nb_epoch,\r\n shuffle=True,\r\n verbose=1,\r\n validation_split=0.2, # fraction of the data held-out for validation\r\n callbacks=[LearningRateScheduler(lr_schedule), history,csv_logger,\r\n ModelCheckpoint(weights_filename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')] \r\n )\r\n # ModelCheckpoint('incep_weights.{epoch:02d}-{val_loss:.2f}.h5',\r\n #EarlyStopping(monitor='val_loss', patience=2, verbose=0),\r\n \r\n #Get history of accuracy and plot it\r\n # print(\"hhistory acc: \",history.acc)\r\n # print(\" history acc type: \", type(history.acc))\r\n #np.save('history_acc_inception', history.acc)\r\n #plt.plot(range(1,nb_epoch+1), history.acc)\r\n #plt.xlabel('Epochs')\r\n #plt.ylabel('Accuracy')\r\n #plt.title(\"Inception\")\r\n #plt.show()\r\n\r\n y_pred= model.predict_classes(X_valid)\r\n print(\"Predictions: \", y_pred)\r\n model.metrics_names\r\n \r\n y_eval=model.evaluate(X_valid,Y_valid)\r\n print(\"Evaluation: \", y_eval)\r\n\r\n f=open(result_file, 'w')\r\n f.write('Y_pred: ' + str(y_pred) )\r\n f.write('Y_eval: ' + str(y_eval))\r\n f.close()\r\n \r\n cm=confusion_matrix(Y_test, y_pred) # confusion matrix\r\n print(cm)\r\n\r\n plt.matshow(cm)\r\n plt.title('Confusion matrix InceptionV3')\r\n plt.colorbar()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n #plt.show()\r\n plt.savefig(matrix_filename)\r\n plt.close()\r\n\r\n print(\"Done!\")\r\n\r\n \r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from rest_framework import serializers
from django.contrib.auth import password_validation
from rest_framework.validators import UniqueValidator
from .models import CustomUser, Role, Permission, ActionEntity
from .utils import create_permission
class ActionEntitySerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
class Meta:
model = ActionEntity
fields = '__all__'
class PermissionSerializer(serializers.ModelSerializer):
actionEntitySet = ActionEntitySerializer(many=True)
class Meta:
model = Permission
fields = '__all__'
class RoleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
permissions = PermissionSerializer(many=True)
name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())])
status = serializers.IntegerField()
describe = serializers.CharField(required=False, allow_null=True, max_length=128)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
role = Role.objects.create(**validated_data)
create_permission(role)
return role
def update(self, instance, validated_data):
permissions = validated_data.pop('permissions', None)
for permissionData in permissions:
for actionData in permissionData.get('actionEntitySet'):
action = ActionEntity.objects.get(pk=actionData.get('id'))
action.enable = actionData.get('enable')
action.save()
super().update(instance, validated_data)
return instance
class SelfChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
old_password = data.get('old_password', None)
new_password = data.get('new_password', None)
if old_password is not None and not self.get_current_user().check_password(old_password):
raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'})
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(max_length=128)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
new_password = data.get('new_password', None)
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class UserCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())])
password = serializers.CharField(max_length=128)
price_level = serializers.IntegerField(min_value=1, max_value=5)
balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = (
'password',
)
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = (
'id', 'username', 'price_level'
)
|
normal
|
{
"blob_id": "b10a50ce649650542d176a2f6fb8c35c500fbc38",
"index": 3644,
"step-1": "<mask token>\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-2": "<mask token>\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Role\n fields = '__all__'\n <mask token>\n <mask token>\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-3": "<mask token>\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Role\n fields = '__all__'\n <mask token>\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n super().update(instance, validated_data)\n return instance\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-4": "<mask token>\n\n\nclass ActionEntitySerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = ActionEntity\n fields = '__all__'\n\n\nclass PermissionSerializer(serializers.ModelSerializer):\n actionEntitySet = ActionEntitySerializer(many=True)\n\n\n class Meta:\n model = Permission\n fields = '__all__'\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField()\n permissions = PermissionSerializer(many=True)\n name = serializers.CharField(max_length=32, validators=[UniqueValidator\n (queryset=Role.objects.all())])\n status = serializers.IntegerField()\n describe = serializers.CharField(required=False, allow_null=True,\n max_length=128)\n\n\n class Meta:\n model = Role\n fields = '__all__'\n\n def create(self, validated_data):\n permissions = validated_data.pop('permissions', None)\n role = Role.objects.create(**validated_data)\n create_permission(role)\n return role\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n super().update(instance, validated_data)\n return instance\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-5": "from rest_framework import serializers\nfrom django.contrib.auth import password_validation\nfrom rest_framework.validators import UniqueValidator\n\nfrom .models import CustomUser, Role, Permission, ActionEntity\nfrom .utils import create_permission\n\nclass ActionEntitySerializer(serializers.ModelSerializer):\n\n id = serializers.IntegerField(required=False)\n \n class Meta:\n model = ActionEntity\n fields = '__all__'\n\nclass PermissionSerializer(serializers.ModelSerializer):\n\n actionEntitySet = ActionEntitySerializer(many=True)\n\n class Meta:\n model = Permission\n fields = '__all__'\n\nclass RoleSerializer(serializers.ModelSerializer):\n\n id = serializers.ReadOnlyField()\n\n permissions = PermissionSerializer(many=True)\n\n name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())])\n\n status = serializers.IntegerField() \n\n describe = serializers.CharField(required=False, allow_null=True, max_length=128)\n\n class Meta:\n model = Role\n fields = '__all__'\n\n def create(self, validated_data):\n permissions = validated_data.pop('permissions', None)\n role = Role.objects.create(**validated_data)\n create_permission(role)\n return role\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n\n super().update(instance, validated_data)\n\n return instance\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n\n if old_password is not None and not self.get_current_user().check_password(old_password):\n raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'})\n \n if new_password is not None:\n password_validation.validate_password(new_password)\n\n return super().validate(data)\n\nclass ChangePasswordSerializer(serializers.Serializer):\n\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n\n if new_password is not None:\n password_validation.validate_password(new_password)\n\n return super().validate(data)\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0)\n\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n \n if password is not None:\n password_validation.validate_password(password)\n\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\nclass UserSerializer(serializers.ModelSerializer):\n\n role = RoleSerializer(read_only=True)\n\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n class Meta:\n model = CustomUser\n exclude = (\n 'password',\n )\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CustomUser\n fields = (\n 'id', 'username', 'price_level'\n )",
"step-ids": [
7,
17,
18,
23,
26
]
}
|
[
7,
17,
18,
23,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, n + 1):
if i % 2 == 1:
print('1 ', end='')
else:
print('0 ', end='')
<|reserved_special_token_1|>
n = int(input("Please input the number of 1's and 0's you want to print:"))
for i in range(1, n + 1):
if i % 2 == 1:
print('1 ', end='')
else:
print('0 ', end='')
<|reserved_special_token_1|>
n = int(input("Please input the number of 1's and 0's you want to print:"))
for i in range (1, n+1):
if i%2 == 1:
print ("1 ", end = "")
else:
print ("0 ", end = "")
|
flexible
|
{
"blob_id": "bd96b31c5de2f0ad4bbc28c876b86ec238db3184",
"index": 9108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n if i % 2 == 1:\n print('1 ', end='')\n else:\n print('0 ', end='')\n",
"step-3": "n = int(input(\"Please input the number of 1's and 0's you want to print:\"))\nfor i in range(1, n + 1):\n if i % 2 == 1:\n print('1 ', end='')\n else:\n print('0 ', end='')\n",
"step-4": "n = int(input(\"Please input the number of 1's and 0's you want to print:\"))\n\nfor i in range (1, n+1):\n if i%2 == 1:\n print (\"1 \", end = \"\")\n else:\n print (\"0 \", end = \"\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
def read(inp):
res = []
n, v = map(int, inp.readline().split())
for i in range(n):
x, y = map(int, inp.readline().split())
res.append((x, y))
return v, res
def solve(v, items):
res = 0
rem_v = v
for item in items:
if rem_v > item[1]:
res += item[0]
rem_v -= item[1]
else:
res += item[0] * (rem_v/item[1])
break
return res
if __name__ == '__main__':
inp = open('1', 'r')
# inp = sys.stdin
v, items = read(inp)
s_items = sorted(items, key=lambda i: i[0]/i[1], reverse=True)
res = solve(v, s_items)
print(res)
|
normal
|
{
"blob_id": "8b0e7e8f2031df217894e980758e15d7401c0981",
"index": 2750,
"step-1": "<mask token>\n\n\ndef read(inp):\n res = []\n n, v = map(int, inp.readline().split())\n for i in range(n):\n x, y = map(int, inp.readline().split())\n res.append((x, y))\n return v, res\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read(inp):\n res = []\n n, v = map(int, inp.readline().split())\n for i in range(n):\n x, y = map(int, inp.readline().split())\n res.append((x, y))\n return v, res\n\n\ndef solve(v, items):\n res = 0\n rem_v = v\n for item in items:\n if rem_v > item[1]:\n res += item[0]\n rem_v -= item[1]\n else:\n res += item[0] * (rem_v / item[1])\n break\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read(inp):\n res = []\n n, v = map(int, inp.readline().split())\n for i in range(n):\n x, y = map(int, inp.readline().split())\n res.append((x, y))\n return v, res\n\n\ndef solve(v, items):\n res = 0\n rem_v = v\n for item in items:\n if rem_v > item[1]:\n res += item[0]\n rem_v -= item[1]\n else:\n res += item[0] * (rem_v / item[1])\n break\n return res\n\n\nif __name__ == '__main__':\n inp = open('1', 'r')\n v, items = read(inp)\n s_items = sorted(items, key=lambda i: i[0] / i[1], reverse=True)\n res = solve(v, s_items)\n print(res)\n",
"step-4": "import sys\n\n\ndef read(inp):\n res = []\n n, v = map(int, inp.readline().split())\n for i in range(n):\n x, y = map(int, inp.readline().split())\n res.append((x, y))\n return v, res\n\n\ndef solve(v, items):\n res = 0\n rem_v = v\n for item in items:\n if rem_v > item[1]:\n res += item[0]\n rem_v -= item[1]\n else:\n res += item[0] * (rem_v / item[1])\n break\n return res\n\n\nif __name__ == '__main__':\n inp = open('1', 'r')\n v, items = read(inp)\n s_items = sorted(items, key=lambda i: i[0] / i[1], reverse=True)\n res = solve(v, s_items)\n print(res)\n",
"step-5": "import sys\n\n\ndef read(inp):\n res = []\n n, v = map(int, inp.readline().split())\n for i in range(n):\n x, y = map(int, inp.readline().split())\n res.append((x, y))\n\n return v, res\n\n\ndef solve(v, items):\n res = 0\n rem_v = v\n\n for item in items:\n if rem_v > item[1]:\n res += item[0]\n rem_v -= item[1]\n else:\n res += item[0] * (rem_v/item[1])\n break\n\n return res\n\n\nif __name__ == '__main__':\n inp = open('1', 'r')\n # inp = sys.stdin\n\n v, items = read(inp)\n s_items = sorted(items, key=lambda i: i[0]/i[1], reverse=True)\n res = solve(v, s_items)\n\n print(res)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(2 * int(input())):
a.append(int(input()))
if 1 in a:
c = a.index(max(a))
if a[c + 1] == 1:
print(c)
else:
del a[c]
s = a.index(max(a))
if a[s + 1] == 1:
print(s)
else:
print('-1')
<|reserved_special_token_1|>
a = []
for i in range(2 * int(input())):
a.append(int(input()))
if 1 in a:
c = a.index(max(a))
if a[c + 1] == 1:
print(c)
else:
del a[c]
s = a.index(max(a))
if a[s + 1] == 1:
print(s)
else:
print('-1')
<|reserved_special_token_1|>
a = []
for i in range((2 * int(input()))):
a.append(int(input()))
if 1 in a:
c = a.index(max(a))
if a[c + 1] == 1:
print(c)
else:
del a[c]
s = a.index(max(a))
if a[s + 1] == 1:
print(s)
else:
print('-1')
|
flexible
|
{
"blob_id": "e3e50df47ef074f13382e249832c065ebdce18a6",
"index": 8406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2 * int(input())):\n a.append(int(input()))\nif 1 in a:\n c = a.index(max(a))\n if a[c + 1] == 1:\n print(c)\n else:\n del a[c]\n s = a.index(max(a))\n if a[s + 1] == 1:\n print(s)\nelse:\n print('-1')\n",
"step-3": "a = []\nfor i in range(2 * int(input())):\n a.append(int(input()))\nif 1 in a:\n c = a.index(max(a))\n if a[c + 1] == 1:\n print(c)\n else:\n del a[c]\n s = a.index(max(a))\n if a[s + 1] == 1:\n print(s)\nelse:\n print('-1')\n",
"step-4": "a = []\nfor i in range((2 * int(input()))):\n a.append(int(input()))\nif 1 in a:\n c = a.index(max(a))\n if a[c + 1] == 1:\n print(c)\n else:\n del a[c]\n s = a.index(max(a))\n if a[s + 1] == 1:\n print(s)\nelse:\n print('-1')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
print('\nPlayer Items:')
for item in player.items:
print('\t', item)
print('Room - ', player.current_room)
print('Items in Room:')
for item in player.current_room.items:
print('\t', item)
userInput = input(
"""What would you like to do?
Enter [n], [s], [e] or [w] to move across rooms
Enter "take [item_name]" or "drop [item_name]" to add or remove items
Enter [q] to quit the game
"""
)
userInputWords = userInput.split(' ')
if userInput == 'q':
print('You chose to quit!')
break
elif len(userInputWords) == 1:
player.move(userInput)
elif len(userInputWords) == 2:
verb = userInputWords[0]
itemName = userInputWords[1]
if itemName in itemList:
player.action(verb, itemList[itemName])
else:
print('Invalid item choice')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
room = {'outside': Room('Outside Cave Entrance',
'North of you, the cave mount beckons'), 'foyer': Room('Foyer',
"""Dim light filters in from the south. Dusty
passages run north and east."""
), 'overlook': Room('Grand Overlook',
"""A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""
), 'narrow': Room('Narrow Passage',
"""The narrow passage bends here from west
to north. The smell of gold permeates the air."""
), 'treasure': Room('Treasure Chamber',
"""You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""
)}
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
itemList = {'Brick': Item('Brick', 'Build settlement and roads'), 'Wood':
Item('Wood', 'Build settlement and roads'), 'Sheep': Item('Sheep',
'Build settlement and get development cards'), 'Grain': Item('Grain',
'Build settlement, cities and get development cards'), 'Stone': Item(
'Stone', 'Build cities and get development cards'), 'DCard': Item(
'Development Cards', 'Get special powers')}
room['outside'].items = [itemList['Brick'], itemList['Wood']]
room['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]
room['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList[
'Grain']]
room['narrow'].items = [itemList['Stone'], itemList['Grain']]
room['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList[
'Wood']]
playerName = input('Hello, What is your name?\n')
player = Player(playerName, room['outside'])
while True:
print('\nPlayer Items:')
for item in player.items:
print('\t', item)
print('Room - ', player.current_room)
print('Items in Room:')
for item in player.current_room.items:
print('\t', item)
userInput = input(
"""What would you like to do?
Enter [n], [s], [e] or [w] to move across rooms
Enter "take [item_name]" or "drop [item_name]" to add or remove items
Enter [q] to quit the game
"""
)
userInputWords = userInput.split(' ')
if userInput == 'q':
print('You chose to quit!')
break
elif len(userInputWords) == 1:
player.move(userInput)
elif len(userInputWords) == 2:
verb = userInputWords[0]
itemName = userInputWords[1]
if itemName in itemList:
player.action(verb, itemList[itemName])
else:
print('Invalid item choice')
<|reserved_special_token_1|>
from room import Room
from player import Player
from item import Item
room = {'outside': Room('Outside Cave Entrance',
'North of you, the cave mount beckons'), 'foyer': Room('Foyer',
"""Dim light filters in from the south. Dusty
passages run north and east."""
), 'overlook': Room('Grand Overlook',
"""A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""
), 'narrow': Room('Narrow Passage',
"""The narrow passage bends here from west
to north. The smell of gold permeates the air."""
), 'treasure': Room('Treasure Chamber',
"""You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""
)}
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
itemList = {'Brick': Item('Brick', 'Build settlement and roads'), 'Wood':
Item('Wood', 'Build settlement and roads'), 'Sheep': Item('Sheep',
'Build settlement and get development cards'), 'Grain': Item('Grain',
'Build settlement, cities and get development cards'), 'Stone': Item(
'Stone', 'Build cities and get development cards'), 'DCard': Item(
'Development Cards', 'Get special powers')}
room['outside'].items = [itemList['Brick'], itemList['Wood']]
room['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]
room['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList[
'Grain']]
room['narrow'].items = [itemList['Stone'], itemList['Grain']]
room['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList[
'Wood']]
playerName = input('Hello, What is your name?\n')
player = Player(playerName, room['outside'])
while True:
print('\nPlayer Items:')
for item in player.items:
print('\t', item)
print('Room - ', player.current_room)
print('Items in Room:')
for item in player.current_room.items:
print('\t', item)
userInput = input(
"""What would you like to do?
Enter [n], [s], [e] or [w] to move across rooms
Enter "take [item_name]" or "drop [item_name]" to add or remove items
Enter [q] to quit the game
"""
)
userInputWords = userInput.split(' ')
if userInput == 'q':
print('You chose to quit!')
break
elif len(userInputWords) == 1:
player.move(userInput)
elif len(userInputWords) == 2:
verb = userInputWords[0]
itemName = userInputWords[1]
if itemName in itemList:
player.action(verb, itemList[itemName])
else:
print('Invalid item choice')
<|reserved_special_token_1|>
from room import Room
from player import Player
from item import Item
# Declare all the rooms
room = {
'outside': Room("Outside Cave Entrance",
"North of you, the cave mount beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air."""),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# list of items
itemList = {
'Brick': Item('Brick', 'Build settlement and roads'),
'Wood': Item('Wood', 'Build settlement and roads'),
'Sheep': Item('Sheep', 'Build settlement and get development cards'),
'Grain': Item('Grain', 'Build settlement, cities and get development cards'),
'Stone': Item('Stone', 'Build cities and get development cards'),
'DCard': Item('Development Cards', 'Get special powers')
}
# items assignment to rooms
room['outside'].items = [itemList['Brick'], itemList['Wood']]
room['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]
room['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList['Grain']]
room['narrow'].items = [itemList['Stone'], itemList['Grain']]
room['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList['Wood']]
#
# Main
#
# Make a new player object that is currently in the 'outside' room.
# Write a loop that:
#
# * Prints the current room name
# * Prints the current description (the textwrap module might be useful here).
# * Waits for user input and decides what to do.
#
# If the user enters a cardinal direction, attempt to move to the room there.
# Print an error message if the movement isn't allowed.
#
# If the user enters "q", quit the game.
# Get Player Name
playerName = input('Hello, What is your name?\n')
# Initialize player with given name
player = Player(playerName, room['outside'])
while True:
# print player item inventory
print('\nPlayer Items:')
for item in player.items:
print('\t', item)
# print current room and items available in the room
print('Room - ', player.current_room)
print('Items in Room:')
for item in player.current_room.items:
print('\t', item)
# Get the User Input
userInput = input('What would you like to do? \n\tEnter [n], [s], [e] or [w] to move across rooms \n\tEnter \"take [item_name]\" or \"drop [item_name]\" to add or remove items \n\tEnter [q] to quit the game\n')
userInputWords = userInput.split(' ')
if userInput == 'q':
print('You chose to quit!')
break
elif len(userInputWords) == 1:
player.move(userInput)
elif len(userInputWords) == 2:
verb = userInputWords[0]
itemName = userInputWords[1]
if itemName in itemList:
player.action(verb, itemList[itemName])
else:
print('Invalid item choice')
|
flexible
|
{
"blob_id": "beb536b6d8883daaa7e41da03145dd98aa223cbf",
"index": 5036,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n print('\\nPlayer Items:')\n for item in player.items:\n print('\\t', item)\n print('Room - ', player.current_room)\n print('Items in Room:')\n for item in player.current_room.items:\n print('\\t', item)\n userInput = input(\n \"\"\"What would you like to do? \n\tEnter [n], [s], [e] or [w] to move across rooms \n\tEnter \"take [item_name]\" or \"drop [item_name]\" to add or remove items \n\tEnter [q] to quit the game\n\"\"\"\n )\n userInputWords = userInput.split(' ')\n if userInput == 'q':\n print('You chose to quit!')\n break\n elif len(userInputWords) == 1:\n player.move(userInput)\n elif len(userInputWords) == 2:\n verb = userInputWords[0]\n itemName = userInputWords[1]\n if itemName in itemList:\n player.action(verb, itemList[itemName])\n else:\n print('Invalid item choice')\n",
"step-3": "<mask token>\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons'), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n ), 'overlook': Room('Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n ), 'narrow': Room('Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n ), 'treasure': Room('Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n )}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nitemList = {'Brick': Item('Brick', 'Build settlement and roads'), 'Wood':\n Item('Wood', 'Build settlement and roads'), 'Sheep': Item('Sheep',\n 'Build settlement and get development cards'), 'Grain': Item('Grain',\n 'Build settlement, cities and get development cards'), 'Stone': Item(\n 'Stone', 'Build cities and get development cards'), 'DCard': Item(\n 'Development Cards', 'Get special powers')}\nroom['outside'].items = [itemList['Brick'], itemList['Wood']]\nroom['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]\nroom['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList[\n 'Grain']]\nroom['narrow'].items = [itemList['Stone'], itemList['Grain']]\nroom['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList[\n 'Wood']]\nplayerName = input('Hello, What is your name?\\n')\nplayer = Player(playerName, room['outside'])\nwhile True:\n print('\\nPlayer Items:')\n for item in player.items:\n print('\\t', item)\n print('Room - ', player.current_room)\n print('Items in Room:')\n for item in player.current_room.items:\n print('\\t', item)\n userInput = input(\n \"\"\"What would you like to do? \n\tEnter [n], [s], [e] or [w] to move across rooms \n\tEnter \"take [item_name]\" or \"drop [item_name]\" to add or remove items \n\tEnter [q] to quit the game\n\"\"\"\n )\n userInputWords = userInput.split(' ')\n if userInput == 'q':\n print('You chose to quit!')\n break\n elif len(userInputWords) == 1:\n player.move(userInput)\n elif len(userInputWords) == 2:\n verb = userInputWords[0]\n itemName = userInputWords[1]\n if itemName in itemList:\n player.action(verb, itemList[itemName])\n else:\n print('Invalid item choice')\n",
"step-4": "from room import Room\nfrom player import Player\nfrom item import Item\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons'), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n ), 'overlook': Room('Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n ), 'narrow': Room('Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n ), 'treasure': Room('Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n )}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nitemList = {'Brick': Item('Brick', 'Build settlement and roads'), 'Wood':\n Item('Wood', 'Build settlement and roads'), 'Sheep': Item('Sheep',\n 'Build settlement and get development cards'), 'Grain': Item('Grain',\n 'Build settlement, cities and get development cards'), 'Stone': Item(\n 'Stone', 'Build cities and get development cards'), 'DCard': Item(\n 'Development Cards', 'Get special powers')}\nroom['outside'].items = [itemList['Brick'], itemList['Wood']]\nroom['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]\nroom['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList[\n 'Grain']]\nroom['narrow'].items = [itemList['Stone'], itemList['Grain']]\nroom['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList[\n 'Wood']]\nplayerName = input('Hello, What is your name?\\n')\nplayer = Player(playerName, room['outside'])\nwhile True:\n print('\\nPlayer Items:')\n for item in player.items:\n print('\\t', item)\n print('Room - ', player.current_room)\n print('Items in Room:')\n for item in player.current_room.items:\n print('\\t', item)\n userInput = input(\n \"\"\"What would you like to do? \n\tEnter [n], [s], [e] or [w] to move across rooms \n\tEnter \"take [item_name]\" or \"drop [item_name]\" to add or remove items \n\tEnter [q] to quit the game\n\"\"\"\n )\n userInputWords = userInput.split(' ')\n if userInput == 'q':\n print('You chose to quit!')\n break\n elif len(userInputWords) == 1:\n player.move(userInput)\n elif len(userInputWords) == 2:\n verb = userInputWords[0]\n itemName = userInputWords[1]\n if itemName in itemList:\n player.action(verb, itemList[itemName])\n else:\n print('Invalid item choice')\n",
"step-5": "from room import Room\nfrom player import Player\nfrom item import Item\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n# list of items\nitemList = {\n 'Brick': Item('Brick', 'Build settlement and roads'),\n 'Wood': Item('Wood', 'Build settlement and roads'),\n 'Sheep': Item('Sheep', 'Build settlement and get development cards'),\n 'Grain': Item('Grain', 'Build settlement, cities and get development cards'),\n 'Stone': Item('Stone', 'Build cities and get development cards'),\n 'DCard': Item('Development Cards', 'Get special powers')\n}\n\n# items assignment to rooms\nroom['outside'].items = [itemList['Brick'], itemList['Wood']]\nroom['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]\nroom['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList['Grain']]\nroom['narrow'].items = [itemList['Stone'], itemList['Grain']]\nroom['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList['Wood']]\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\n\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n\n# Get Player Name\nplayerName = input('Hello, What is your name?\\n')\n\n# Initialize player with given name\nplayer = Player(playerName, room['outside'])\n\nwhile True:\n # print player item inventory\n print('\\nPlayer Items:')\n for item in player.items:\n print('\\t', item)\n\n # print current room and items available in the room\n print('Room - ', player.current_room)\n print('Items in Room:')\n for item in player.current_room.items:\n print('\\t', item)\n\n\n # Get the User Input\n userInput = input('What would you like to do? \\n\\tEnter [n], [s], [e] or [w] to move across rooms \\n\\tEnter \\\"take [item_name]\\\" or \\\"drop [item_name]\\\" to add or remove items \\n\\tEnter [q] to quit the game\\n')\n\n userInputWords = userInput.split(' ')\n\n if userInput == 'q':\n print('You chose to quit!')\n break\n elif len(userInputWords) == 1:\n player.move(userInput)\n elif len(userInputWords) == 2:\n verb = userInputWords[0]\n itemName = userInputWords[1]\n if itemName in itemList:\n player.action(verb, itemList[itemName])\n else:\n print('Invalid item choice')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def check_orthogonal(u, v):
return u.dot(v) == 0
def check_p():
import inspect
import re
local_vars = inspect.currentframe().f_back.f_locals
return len(re.findall("p\\s*=\\s*0", str(local_vars))) == 0
|
normal
|
{
"blob_id": "36e538ca7fbdbf6e2e6ca1ae126e4e75940bb5cd",
"index": 4316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_p():\n import inspect\n import re\n local_vars = inspect.currentframe().f_back.f_locals\n return len(re.findall('p\\\\s*=\\\\s*0', str(local_vars))) == 0\n",
"step-3": "def check_orthogonal(u, v):\n return u.dot(v) == 0\n\n\ndef check_p():\n import inspect\n import re\n local_vars = inspect.currentframe().f_back.f_locals\n return len(re.findall('p\\\\s*=\\\\s*0', str(local_vars))) == 0\n",
"step-4": "def check_orthogonal(u, v):\n return u.dot(v) == 0\n\n\ndef check_p():\n import inspect\n import re\n local_vars = inspect.currentframe().f_back.f_locals\n return len(re.findall(\"p\\\\s*=\\\\s*0\", str(local_vars))) == 0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
heights = []
for i in range(10):
line = sys.stdin.readline()
height = int(line)
heights.append(height)
heights.sort()
heights.reverse()
for i in range(3):
print(heights[i])
|
normal
|
{
"blob_id": "3e48de2e3b12965de1b3b5cb6c3cf68c90ec6212",
"index": 2274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n",
"step-3": "<mask token>\nheights = []\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n",
"step-4": "import sys\nheights = []\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import sys
def add_them(a, b):
return a + b
def main():
print add_them(10, 21)
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "aebf1d64923c5f325c9d429be092deaa06f20963",
"index": 6232,
"step-1": "#!/usr/bin/env python\n\nimport sys\n\ndef add_them(a, b):\n return a + b\n\ndef main():\n print add_them(10, 21)\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#Merge Sort
#O(nlogn)
#Merge Part
from __future__ import division #use for python2
def merge(A, B): #Merge A[0:m], B[0,n]
(C, m, n) = ([], len(A), len(B))
(i, j) = (0, 0) #Current positions in A, B
while (i + j) < (m + n): #i+j is no. of elements merged so far
if i == m: #case 1: A is empty
C.append(B[j])
j = j+1
elif j == n: #case 2: B is empty
C.append(A[i])
i = i+1
elif A[i] < B[j]: #case 3: Head of A is smaller
C.append(A[i])
i = i+1
elif A[i] > B[j]: #case 4: Head of B is smaller
C.append(B[j])
j = j+1
else:
pass
return C
#A = range(0, 100, 2) # generate the lists
#B = range(1, 75, 2) # generate the lists
#print merge(A, B)
#print "\n"
#print len(A) + len(B)
#Sort Part
def mergeSort(A, left, right):
#Sort the slice A[left:right]
if (right - left) <= 1: #Base Case
return A[left:right]
if (right - left) > 1: #Recursive call
mid = (left + right)//2
L = mergeSort(A, left, mid)
R = mergeSort(A, mid, right)
return (merge(L,R))
a = range(1, 100, 2) + range(0, 100, 2)
#print a
#print mergeSort(a, 0, len(a))
|
normal
|
{
"blob_id": "7b4c2689ad1d4601a108dd8aa6e3c4d1e9730dc5",
"index": 5257,
"step-1": "<mask token>\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\ndef mergeSort(A, left, right):\n if right - left <= 1:\n return A[left:right]\n if right - left > 1:\n mid = (left + right) // 2\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n return merge(L, R)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\ndef mergeSort(A, left, right):\n if right - left <= 1:\n return A[left:right]\n if right - left > 1:\n mid = (left + right) // 2\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n return merge(L, R)\n\n\na = range(1, 100, 2) + range(0, 100, 2)\n",
"step-4": "from __future__ import division\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\ndef mergeSort(A, left, right):\n if right - left <= 1:\n return A[left:right]\n if right - left > 1:\n mid = (left + right) // 2\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n return merge(L, R)\n\n\na = range(1, 100, 2) + range(0, 100, 2)\n",
"step-5": "\n#Merge Sort\n#O(nlogn)\n\n#Merge Part\n\nfrom __future__ import division #use for python2\n\ndef merge(A, B): #Merge A[0:m], B[0,n]\n (C, m, n) = ([], len(A), len(B))\n (i, j) = (0, 0) #Current positions in A, B\n\n while (i + j) < (m + n): #i+j is no. of elements merged so far\n if i == m: #case 1: A is empty\n C.append(B[j])\n j = j+1\n elif j == n: #case 2: B is empty\n C.append(A[i])\n i = i+1\n elif A[i] < B[j]: #case 3: Head of A is smaller\n C.append(A[i])\n i = i+1\n elif A[i] > B[j]: #case 4: Head of B is smaller\n C.append(B[j])\n j = j+1\n else:\n pass\n\n return C\n\n#A = range(0, 100, 2) # generate the lists\n#B = range(1, 75, 2) # generate the lists\n\n#print merge(A, B)\n#print \"\\n\"\n#print len(A) + len(B)\n\n#Sort Part\n\ndef mergeSort(A, left, right):\n #Sort the slice A[left:right]\n\n if (right - left) <= 1: #Base Case\n return A[left:right]\n\n if (right - left) > 1: #Recursive call\n mid = (left + right)//2\n\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n\n return (merge(L,R))\n\na = range(1, 100, 2) + range(0, 100, 2)\n\n#print a\n\n#print mergeSort(a, 0, len(a))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(A):
N = len(A)
min_avg = (A[0] + A[1]) / 2.0
min_idx = 0
now_avg = 0.0
for i in xrange(1, N - 1):
now_avg = (A[i] + A[i + 1]) / 2.0
if now_avg < min_avg:
min_avg = now_avg
min_idx = i
if N > 2:
for i in xrange(N - 2):
now_avg = (A[i] + A[i + 1] + A[i + 2]) / 3.0
if now_avg < min_avg:
min_avg = now_avg
min_idx = i
return min_idx
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
# you can use print for debugging purposes, e.g.
# print "this is a debug message"
def solution(A):
N = len (A)
#min_avg = min( (A[0] + A[1]) / 2, (A[0] + A[1] + A[2]) / 3)
min_avg = (A[0] + A[1]) / 2.0
min_idx = 0
now_avg = 0.0
for i in xrange(1,N-1):
now_avg = (A[i] + A[i+1]) / 2.0
if now_avg < min_avg:
min_avg = now_avg
min_idx = i
if N > 2:
for i in xrange(N-2):
now_avg = (A[i] + A[i+1] + A[i+2]) / 3.0
if now_avg < min_avg:
min_avg = now_avg
min_idx = i
return min_idx
"""
non-empty zero-indexed array A consisting of N integers is given. A pair of integers (P, Q), such that 0 ≤ P < Q < N, is called a slice of array A (notice that the slice contains at least two elements). The average of a slice (P, Q) is the sum of A[P] + A[P + 1] + ... + A[Q] divided by the length of the slice. To be precise, the average equals (A[P] + A[P + 1] + ... + A[Q]) / (Q − P + 1).
For example, array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
contains the following example slices:
slice (1, 2), whose average is (2 + 2) / 2 = 2;
slice (3, 4), whose average is (5 + 1) / 2 = 3;
slice (1, 4), whose average is (2 + 2 + 5 + 1) / 4 = 2.5.
The goal is to find the starting position of a slice whose average is minimal.
Write a function:
def solution(A)
that, given a non-empty zero-indexed array A consisting of N integers, returns the starting position of the slice with the minimal average. If there is more than one slice with a minimal average, you should return the smallest starting position of such a slice.
For example, given array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
the function should return 1, as explained above.
Assume that:
N is an integer within the range [2..100,000];
each element of array A is an integer within the range [−10,000..10,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
"""
Analysis
Detected time complexity:
O(N)
collapse all
Example tests
▶
example
example test
✔
OK
1.
0.067 s
OK
collapse all
Correctness tests
▶
double_quadruple
two or four elements
✔
OK
1.
0.066 s
OK
2.
0.067 s
OK
3.
0.067 s
OK
4.
0.066 s
OK
▶
simple1
simple test, the best slice has length 3
✔
OK
1.
0.066 s
OK
2.
0.065 s
OK
▶
simple2
simple test, the best slice has length 3
✔
OK
1.
0.067 s
OK
▶
small_random
random, length = 100
✔
OK
1.
0.067 s
OK
▶
medium_range
increasing, decreasing (legth = ~100) and small functional
✔
OK
1.
0.066 s
OK
2.
0.067 s
OK
3.
0.067 s
OK
collapse all
Performance tests
▶
medium_random
random, N = ~700
✔
OK
1.
0.066 s
OK
▶
large_ones
numbers from -1 to 1, N = ~100,000
✔
OK
1.
0.168 s
OK
2.
0.143 s
OK
▶
large_random
random, N = ~100,000
✔
OK
1.
0.178 s
OK
▶
extreme_values
all maximal values, N = ~100,000
✔
OK
1.
0.184 s
OK
2.
0.181 s
OK
3.
0.175 s
OK
▶
large_sequence
many seqeneces, N = ~100,000
✔
OK
1.
0.168 s
OK
2.
0.142 s
OK
"""
|
flexible
|
{
"blob_id": "caa92eb5582135f60a6034cb83d364501361d00e",
"index": 7726,
"step-1": "<mask token>\n",
"step-2": "def solution(A):\n N = len(A)\n min_avg = (A[0] + A[1]) / 2.0\n min_idx = 0\n now_avg = 0.0\n for i in xrange(1, N - 1):\n now_avg = (A[i] + A[i + 1]) / 2.0\n if now_avg < min_avg:\n min_avg = now_avg\n min_idx = i\n if N > 2:\n for i in xrange(N - 2):\n now_avg = (A[i] + A[i + 1] + A[i + 2]) / 3.0\n if now_avg < min_avg:\n min_avg = now_avg\n min_idx = i\n return min_idx\n\n\n<mask token>\n",
"step-3": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# you can use print for debugging purposes, e.g.\n# print \"this is a debug message\"\n\n\ndef solution(A):\n N = len (A)\n #min_avg = min( (A[0] + A[1]) / 2, (A[0] + A[1] + A[2]) / 3)\n min_avg = (A[0] + A[1]) / 2.0\n min_idx = 0\n now_avg = 0.0\n \n \n for i in xrange(1,N-1):\n now_avg = (A[i] + A[i+1]) / 2.0\n if now_avg < min_avg:\n min_avg = now_avg\n min_idx = i \n \n if N > 2: \n for i in xrange(N-2):\n now_avg = (A[i] + A[i+1] + A[i+2]) / 3.0\n if now_avg < min_avg:\n min_avg = now_avg\n min_idx = i\n \n return min_idx\n\n\n\"\"\"\n\n non-empty zero-indexed array A consisting of N integers is given. A pair of integers (P, Q), such that 0 ≤ P < Q < N, is called a slice of array A (notice that the slice contains at least two elements). The average of a slice (P, Q) is the sum of A[P] + A[P + 1] + ... + A[Q] divided by the length of the slice. To be precise, the average equals (A[P] + A[P + 1] + ... + A[Q]) / (Q − P + 1).\n\nFor example, array A such that:\n A[0] = 4\n A[1] = 2\n A[2] = 2\n A[3] = 5\n A[4] = 1\n A[5] = 5\n A[6] = 8\n\ncontains the following example slices:\n\n slice (1, 2), whose average is (2 + 2) / 2 = 2;\n slice (3, 4), whose average is (5 + 1) / 2 = 3;\n slice (1, 4), whose average is (2 + 2 + 5 + 1) / 4 = 2.5.\n\nThe goal is to find the starting position of a slice whose average is minimal.\n\nWrite a function:\n\n def solution(A)\n\nthat, given a non-empty zero-indexed array A consisting of N integers, returns the starting position of the slice with the minimal average. If there is more than one slice with a minimal average, you should return the smallest starting position of such a slice.\n\nFor example, given array A such that:\n A[0] = 4\n A[1] = 2\n A[2] = 2\n A[3] = 5\n A[4] = 1\n A[5] = 5\n A[6] = 8\n\nthe function should return 1, as explained above.\n\nAssume that:\n\n N is an integer within the range [2..100,000];\n each element of array A is an integer within the range [−10,000..10,000].\n\nComplexity:\n\n expected worst-case time complexity is O(N);\n expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).\n\nElements of input arrays can be modified.\n\"\"\"\n\n\"\"\"\nAnalysis\nDetected time complexity:\nO(N)\ncollapse all\nExample tests\n▶\nexample\nexample test\n✔\nOK\n1.\n0.067 s\nOK\ncollapse all\nCorrectness tests\n▶\ndouble_quadruple\ntwo or four elements\n✔\nOK\n1.\n0.066 s\nOK\n2.\n0.067 s\nOK\n3.\n0.067 s\nOK\n4.\n0.066 s\nOK\n▶\nsimple1\nsimple test, the best slice has length 3\n✔\nOK\n1.\n0.066 s\nOK\n2.\n0.065 s\nOK\n▶\nsimple2\nsimple test, the best slice has length 3\n✔\nOK\n1.\n0.067 s\nOK\n▶\nsmall_random\nrandom, length = 100\n✔\nOK\n1.\n0.067 s\nOK\n▶\nmedium_range\nincreasing, decreasing (legth = ~100) and small functional\n✔\nOK\n1.\n0.066 s\nOK\n2.\n0.067 s\nOK\n3.\n0.067 s\nOK\ncollapse all\nPerformance tests\n▶\nmedium_random\nrandom, N = ~700\n✔\nOK\n1.\n0.066 s\nOK\n▶\nlarge_ones\nnumbers from -1 to 1, N = ~100,000\n✔\nOK\n1.\n0.168 s\nOK\n2.\n0.143 s\nOK\n▶\nlarge_random\nrandom, N = ~100,000\n✔\nOK\n1.\n0.178 s\nOK\n▶\nextreme_values\nall maximal values, N = ~100,000\n✔\nOK\n1.\n0.184 s\nOK\n2.\n0.181 s\nOK\n3.\n0.175 s\nOK\n▶\nlarge_sequence\nmany seqeneces, N = ~100,000\n✔\nOK\n1.\n0.168 s\nOK\n2.\n0.142 s\nOK\n\n\"\"\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class VehicleViewSet(viewsets.ModelViewSet):
queryset = Vehicle.objects.all().order_by('id')
serializer_class = VehicleSerializer
permission_classes = [permissions.IsAuthenticated]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OwnerViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class VehicleViewSet(viewsets.ModelViewSet):
queryset = Vehicle.objects.all().order_by('id')
serializer_class = VehicleSerializer
permission_classes = [permissions.IsAuthenticated]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OwnerViewSet(viewsets.ModelViewSet):
queryset = Owner.objects.all().order_by('id')
serializer_class = OwnerSerializer
permission_classes = [permissions.IsAuthenticated]
class VehicleViewSet(viewsets.ModelViewSet):
queryset = Vehicle.objects.all().order_by('id')
serializer_class = VehicleSerializer
permission_classes = [permissions.IsAuthenticated]
<|reserved_special_token_1|>
from .models import Owner, Vehicle
from rest_framework import viewsets, permissions
from .serializers import OwnerSerializer, VehicleSerializer
class OwnerViewSet(viewsets.ModelViewSet):
queryset = Owner.objects.all().order_by('id')
serializer_class = OwnerSerializer
permission_classes = [permissions.IsAuthenticated]
class VehicleViewSet(viewsets.ModelViewSet):
queryset = Vehicle.objects.all().order_by('id')
serializer_class = VehicleSerializer
permission_classes = [permissions.IsAuthenticated]
|
flexible
|
{
"blob_id": "9290294b5df081ef0cae5450a9ea3baef789c041",
"index": 6421,
"step-1": "<mask token>\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-2": "<mask token>\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-3": "<mask token>\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n queryset = Owner.objects.all().order_by('id')\n serializer_class = OwnerSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-4": "from .models import Owner, Vehicle\nfrom rest_framework import viewsets, permissions\nfrom .serializers import OwnerSerializer, VehicleSerializer\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n queryset = Owner.objects.all().order_by('id')\n serializer_class = OwnerSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Sorts:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Sorts:
def quick_sort(self, elements):
"""quick sort implementation"""
if len(elements) < 2:
return elements
else:
shuffle(elements)
pivot = elements[0]
print('pivot is : ', pivot)
less = [i for i in elements[1:] if i <= pivot]
more = [i for i in elements[1:] if i > pivot]
return self.quick_sort(less) + [pivot] + self.quick_sort(more)
<|reserved_special_token_1|>
from random import shuffle
<|reserved_special_token_0|>
class Sorts:
def quick_sort(self, elements):
"""quick sort implementation"""
if len(elements) < 2:
return elements
else:
shuffle(elements)
pivot = elements[0]
print('pivot is : ', pivot)
less = [i for i in elements[1:] if i <= pivot]
more = [i for i in elements[1:] if i > pivot]
return self.quick_sort(less) + [pivot] + self.quick_sort(more)
<|reserved_special_token_1|>
from random import shuffle
"""all sorting algorithm implementation"""
class Sorts:
def quick_sort(self, elements):
"""quick sort implementation"""
if len(elements) < 2:
return elements
else:
shuffle(elements)
pivot = elements[0]
print("pivot is : ", pivot)
less = [i for i in elements[1:] if i <= pivot]
more = [i for i in elements[1:] if i > pivot]
return self.quick_sort(less) + [pivot] + self.quick_sort(more)
|
flexible
|
{
"blob_id": "2044140fb2678f9507946007fdfb7edbaf11798e",
"index": 5683,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sorts:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Sorts:\n\n def quick_sort(self, elements):\n \"\"\"quick sort implementation\"\"\"\n if len(elements) < 2:\n return elements\n else:\n shuffle(elements)\n pivot = elements[0]\n print('pivot is : ', pivot)\n less = [i for i in elements[1:] if i <= pivot]\n more = [i for i in elements[1:] if i > pivot]\n return self.quick_sort(less) + [pivot] + self.quick_sort(more)\n",
"step-4": "from random import shuffle\n<mask token>\n\n\nclass Sorts:\n\n def quick_sort(self, elements):\n \"\"\"quick sort implementation\"\"\"\n if len(elements) < 2:\n return elements\n else:\n shuffle(elements)\n pivot = elements[0]\n print('pivot is : ', pivot)\n less = [i for i in elements[1:] if i <= pivot]\n more = [i for i in elements[1:] if i > pivot]\n return self.quick_sort(less) + [pivot] + self.quick_sort(more)\n",
"step-5": "from random import shuffle\r\"\"\"all sorting algorithm implementation\"\"\"\r\r\rclass Sorts:\r def quick_sort(self, elements):\r \"\"\"quick sort implementation\"\"\"\r if len(elements) < 2:\r return elements\r else:\r shuffle(elements)\r pivot = elements[0]\r print(\"pivot is : \", pivot)\r less = [i for i in elements[1:] if i <= pivot]\r more = [i for i in elements[1:] if i > pivot]\r return self.quick_sort(less) + [pivot] + self.quick_sort(more)\r",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# project: fshell
# author: s0nnet
# time: 2017-01-08
# desc: data_fuzzhash
import sys
sys.path.append("./dao")
from fss_data_fuzzhash_dao import *
class FssFuzzHash:
@staticmethod
def insert_node(agent_id, data):
return FssFuzzHashDao.insert_node(agent_id, data)
|
normal
|
{
"blob_id": "398f9f52b83ffddfb452abbeaad2e83610580fee",
"index": 9763,
"step-1": "<mask token>\n\n\nclass FssFuzzHash:\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FssFuzzHash:\n\n @staticmethod\n def insert_node(agent_id, data):\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-3": "<mask token>\nsys.path.append('./dao')\n<mask token>\n\n\nclass FssFuzzHash:\n\n @staticmethod\n def insert_node(agent_id, data):\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-4": "import sys\nsys.path.append('./dao')\nfrom fss_data_fuzzhash_dao import *\n\n\nclass FssFuzzHash:\n\n @staticmethod\n def insert_node(agent_id, data):\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# project: fshell\n# author: s0nnet\n# time: 2017-01-08\n# desc: data_fuzzhash\n\n\nimport sys\nsys.path.append(\"./dao\")\nfrom fss_data_fuzzhash_dao import *\n\n\nclass FssFuzzHash:\n \n @staticmethod\n def insert_node(agent_id, data):\n\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
from yuancloud import models, fields, api, _
import yuancloud.addons.decimal_precision as dp
from yuancloud.exceptions import UserError
from yuancloud.osv import fields as old_fields
class event_event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_id', string='Event Ticket',
default=lambda rec: rec._default_tickets(), copy=True)
@api.model
def _default_tickets(self):
try:
product = self.env.ref('event_sale.product_product_event')
return [{
'name': _('Subscription'),
'product_id': product.id,
'price': 0,
}]
except ValueError:
return self.env['event.event.ticket']
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', "Event", required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', 'Product',
required=True, domain=[("event_type_id", "!=", False)],
default=lambda self: self._default_product_id())
registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date("Sales End")
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
# FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them
# to be used in qweb views. Waiting a fix, we create an old function field directly.
"""
price_reduce = fields.Float("Price Reduce", compute="_get_price_reduce", store=False,
digits=dp.get_precision('Product Price'))
@api.one
@api.depends('price', 'product_id.lst_price', 'product_id.price')
def _get_price_reduce(self):
product = self.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
self.price_reduce = (1.0 - discount) * self.price
"""
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {
'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',
digits_compute=dp.get_precision('Product Price')),
}
# seats fields
seats_availability = fields.Selection(
[('limited', 'Limited'), ('unlimited', 'Unlimited')],
'Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer('Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will "
"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sale orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _("The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s") % ({
'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',
'order': res.origin or res.sale_order_id.name})
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
|
normal
|
{
"blob_id": "bddba2fd710829db17c6419878ce535df0aba01c",
"index": 2760,
"step-1": "<mask token>\n\n\nclass event_ticket(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-2": "<mask token>\n\n\nclass event_ticket(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale',\n 'product_product_event')\n return product.id\n except ValueError:\n return False\n <mask token>\n <mask token>\n\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price\n ) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n for ticket in self:\n ticket.seats_availability = ('unlimited' if ticket.seats_max ==\n 0 else 'limited')\n (ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.\n seats_used) = (ticket.seats_available) = 0\n if self.ids:\n state_field = {'draft': 'seats_unconfirmed', 'open':\n 'seats_reserved', 'done': 'seats_used'}\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.\n seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-3": "<mask token>\n\n\nclass event_ticket(models.Model):\n _name = 'event.event.ticket'\n _description = 'Event Ticket'\n name = fields.Char('Name', required=True, translate=True)\n event_id = fields.Many2one('event.event', 'Event', required=True,\n ondelete='cascade')\n product_id = fields.Many2one('product.product', 'Product', required=\n True, domain=[('event_type_id', '!=', False)], default=lambda self:\n self._default_product_id())\n registration_ids = fields.One2many('event.registration',\n 'event_ticket_id', 'Registrations')\n price = fields.Float('Price', digits=dp.get_precision('Product Price'))\n deadline = fields.Date('Sales End')\n is_expired = fields.Boolean('Is Expired', compute='_is_expired')\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale',\n 'product_product_event')\n return product.id\n except ValueError:\n return False\n\n @api.one\n @api.depends('deadline')\n def _is_expired(self):\n if self.deadline:\n current_date = fields.Date.context_today(self.with_context({\n 'tz': self.event_id.date_tz}))\n self.is_expired = self.deadline < current_date\n else:\n self.is_expired = False\n <mask token>\n\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price\n ) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n _columns = {'price_reduce': old_fields.function(_get_price_reduce, type\n ='float', string='Price Reduce', digits_compute=dp.get_precision(\n 'Product Price'))}\n seats_availability = fields.Selection([('limited', 'Limited'), (\n 'unlimited', 'Unlimited')], 'Available Seat', required=True, store=\n True, compute='_compute_seats', default='limited')\n seats_max = fields.Integer('Maximum Available Seats', help=\n 'Define the number of available tickets. If you have too much registrations you will not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.'\n )\n seats_reserved = fields.Integer(string='Reserved Seats', compute=\n '_compute_seats', store=True)\n seats_available = fields.Integer(string='Available Seats', compute=\n '_compute_seats', store=True)\n seats_unconfirmed = fields.Integer(string=\n 'Unconfirmed Seat Reservations', compute='_compute_seats', store=True)\n seats_used = fields.Integer(compute='_compute_seats', store=True)\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n for ticket in self:\n ticket.seats_availability = ('unlimited' if ticket.seats_max ==\n 0 else 'limited')\n (ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.\n seats_used) = (ticket.seats_available) = 0\n if self.ids:\n state_field = {'draft': 'seats_unconfirmed', 'open':\n 'seats_reserved', 'done': 'seats_used'}\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.\n seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-4": "from yuancloud import models, fields, api, _\nimport yuancloud.addons.decimal_precision as dp\nfrom yuancloud.exceptions import UserError\nfrom yuancloud.osv import fields as old_fields\n\n\nclass event_event(models.Model):\n _inherit = 'event.event'\n event_ticket_ids = fields.One2many('event.event.ticket', 'event_id',\n string='Event Ticket', default=lambda rec: rec._default_tickets(),\n copy=True)\n\n @api.model\n def _default_tickets(self):\n try:\n product = self.env.ref('event_sale.product_product_event')\n return [{'name': _('Subscription'), 'product_id': product.id,\n 'price': 0}]\n except ValueError:\n return self.env['event.event.ticket']\n\n\nclass event_ticket(models.Model):\n _name = 'event.event.ticket'\n _description = 'Event Ticket'\n name = fields.Char('Name', required=True, translate=True)\n event_id = fields.Many2one('event.event', 'Event', required=True,\n ondelete='cascade')\n product_id = fields.Many2one('product.product', 'Product', required=\n True, domain=[('event_type_id', '!=', False)], default=lambda self:\n self._default_product_id())\n registration_ids = fields.One2many('event.registration',\n 'event_ticket_id', 'Registrations')\n price = fields.Float('Price', digits=dp.get_precision('Product Price'))\n deadline = fields.Date('Sales End')\n is_expired = fields.Boolean('Is Expired', compute='_is_expired')\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale',\n 'product_product_event')\n return product.id\n except ValueError:\n return False\n\n @api.one\n @api.depends('deadline')\n def _is_expired(self):\n if self.deadline:\n current_date = fields.Date.context_today(self.with_context({\n 'tz': self.event_id.date_tz}))\n self.is_expired = self.deadline < current_date\n else:\n self.is_expired = False\n \"\"\"\n price_reduce = fields.Float(\"Price Reduce\", compute=\"_get_price_reduce\", store=False,\n digits=dp.get_precision('Product Price'))\n @api.one\n @api.depends('price', 'product_id.lst_price', 'product_id.price')\n def _get_price_reduce(self):\n product = self.product_id\n discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0\n self.price_reduce = (1.0 - discount) * self.price\n \"\"\"\n\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price\n ) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n _columns = {'price_reduce': old_fields.function(_get_price_reduce, type\n ='float', string='Price Reduce', digits_compute=dp.get_precision(\n 'Product Price'))}\n seats_availability = fields.Selection([('limited', 'Limited'), (\n 'unlimited', 'Unlimited')], 'Available Seat', required=True, store=\n True, compute='_compute_seats', default='limited')\n seats_max = fields.Integer('Maximum Available Seats', help=\n 'Define the number of available tickets. If you have too much registrations you will not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.'\n )\n seats_reserved = fields.Integer(string='Reserved Seats', compute=\n '_compute_seats', store=True)\n seats_available = fields.Integer(string='Available Seats', compute=\n '_compute_seats', store=True)\n seats_unconfirmed = fields.Integer(string=\n 'Unconfirmed Seat Reservations', compute='_compute_seats', store=True)\n seats_used = fields.Integer(compute='_compute_seats', store=True)\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n for ticket in self:\n ticket.seats_availability = ('unlimited' if ticket.seats_max ==\n 0 else 'limited')\n (ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.\n seats_used) = (ticket.seats_available) = 0\n if self.ids:\n state_field = {'draft': 'seats_unconfirmed', 'open':\n 'seats_reserved', 'done': 'seats_used'}\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.\n seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom yuancloud import models, fields, api, _\nimport yuancloud.addons.decimal_precision as dp\nfrom yuancloud.exceptions import UserError\nfrom yuancloud.osv import fields as old_fields\n\n\nclass event_event(models.Model):\n _inherit = 'event.event'\n\n event_ticket_ids = fields.One2many(\n 'event.event.ticket', 'event_id', string='Event Ticket',\n default=lambda rec: rec._default_tickets(), copy=True)\n\n @api.model\n def _default_tickets(self):\n try:\n product = self.env.ref('event_sale.product_product_event')\n return [{\n 'name': _('Subscription'),\n 'product_id': product.id,\n 'price': 0,\n }]\n except ValueError:\n return self.env['event.event.ticket']\n\n\nclass event_ticket(models.Model):\n _name = 'event.event.ticket'\n _description = 'Event Ticket'\n\n name = fields.Char('Name', required=True, translate=True)\n event_id = fields.Many2one('event.event', \"Event\", required=True, ondelete='cascade')\n product_id = fields.Many2one(\n 'product.product', 'Product',\n required=True, domain=[(\"event_type_id\", \"!=\", False)],\n default=lambda self: self._default_product_id())\n registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')\n price = fields.Float('Price', digits=dp.get_precision('Product Price'))\n deadline = fields.Date(\"Sales End\")\n is_expired = fields.Boolean('Is Expired', compute='_is_expired')\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')\n return product.id\n except ValueError:\n return False\n\n @api.one\n @api.depends('deadline')\n def _is_expired(self):\n if self.deadline:\n current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))\n self.is_expired = self.deadline < current_date\n else:\n self.is_expired = False\n\n # FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them\n # to be used in qweb views. Waiting a fix, we create an old function field directly.\n \"\"\"\n price_reduce = fields.Float(\"Price Reduce\", compute=\"_get_price_reduce\", store=False,\n digits=dp.get_precision('Product Price'))\n @api.one\n @api.depends('price', 'product_id.lst_price', 'product_id.price')\n def _get_price_reduce(self):\n product = self.product_id\n discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0\n self.price_reduce = (1.0 - discount) * self.price\n \"\"\"\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n\n _columns = {\n 'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',\n digits_compute=dp.get_precision('Product Price')),\n }\n\n # seats fields\n seats_availability = fields.Selection(\n [('limited', 'Limited'), ('unlimited', 'Unlimited')],\n 'Available Seat', required=True, store=True, compute='_compute_seats', default=\"limited\")\n seats_max = fields.Integer('Maximum Available Seats',\n help=\"Define the number of available tickets. If you have too much registrations you will \"\n \"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.\")\n seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)\n seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)\n seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)\n seats_used = fields.Integer(compute='_compute_seats', store=True)\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n # in addition to origin generic fields, add real relational fields to correctly\n # handle attendees linked to sale orders and their lines\n # TDE FIXME: maybe add an onchange on sale_order_id + origin\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\"The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s\") % ({\n 'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',\n 'order': res.origin or res.sale_order_id.name})\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(registration)\n if line_id:\n att_data.update({\n 'event_id': line_id.event_id.id,\n 'event_id': line_id.event_id.id,\n 'event_ticket_id': line_id.event_ticket_id.id,\n 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id,\n 'sale_order_line_id': line_id.id,\n })\n return att_data\n",
"step-ids": [
7,
12,
14,
19,
20
]
}
|
[
7,
12,
14,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if A <= X and A + B >= X:
print('YES')
else:
print('NO')
<|reserved_special_token_1|>
a = input().split(' ')
A = int(a[0])
B = int(a[1])
X = int(a[2])
if A <= X and A + B >= X:
print('YES')
else:
print('NO')
|
flexible
|
{
"blob_id": "9a60449aa13bc5e7e413d0e47a1972d93ccfe69f",
"index": 7194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif A <= X and A + B >= X:\n print('YES')\nelse:\n print('NO')\n",
"step-3": "a = input().split(' ')\nA = int(a[0])\nB = int(a[1])\nX = int(a[2])\nif A <= X and A + B >= X:\n print('YES')\nelse:\n print('NO')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from pig_util import outputSchema
@outputSchema('word:chararray')
def reverse(word):
"""
Return the reverse text of the provided word
"""
return word[::-1]
@outputSchema('length:int')
def num_chars(word):
"""
Return the length of the provided word
"""
return len(word)
|
normal
|
{
"blob_id": "94560d8f6528a222e771ca6aa60349d9682e8f4b",
"index": 6558,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n@outputSchema('length:int')\ndef num_chars(word):\n \"\"\"\n Return the length of the provided word\n \"\"\"\n return len(word)\n",
"step-4": "from pig_util import outputSchema\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n@outputSchema('length:int')\ndef num_chars(word):\n \"\"\"\n Return the length of the provided word\n \"\"\"\n return len(word)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Primera consulta SQL Server')
<|reserved_special_token_0|>
print('Intentando conectar...')
<|reserved_special_token_0|>
print('Conectado!!!')
<|reserved_special_token_0|>
cursor.execute(sql)
<|reserved_special_token_0|>
print(row)
<|reserved_special_token_0|>
print(row)
<|reserved_special_token_0|>
print(row)
<|reserved_special_token_0|>
print(row)
conexion.close()
print('Fin de programa')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Primera consulta SQL Server')
servidor = 'LOCALHOST\\SQLEXPRESS'
bbdd = 'HOSPITAL'
usuario = 'SA'
password = 'azure'
cadenaconexion = ('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +
servidor + '; DATABASE=' + bbdd + '; UID=' + usuario + '; PWD=' + password)
print('Intentando conectar...')
conexion = pyodbc.connect(cadenaconexion)
print('Conectado!!!')
cursor = conexion.cursor()
sql = 'select * from dept'
cursor.execute(sql)
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
conexion.close()
print('Fin de programa')
<|reserved_special_token_1|>
import pyodbc
print('Primera consulta SQL Server')
servidor = 'LOCALHOST\\SQLEXPRESS'
bbdd = 'HOSPITAL'
usuario = 'SA'
password = 'azure'
cadenaconexion = ('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +
servidor + '; DATABASE=' + bbdd + '; UID=' + usuario + '; PWD=' + password)
print('Intentando conectar...')
conexion = pyodbc.connect(cadenaconexion)
print('Conectado!!!')
cursor = conexion.cursor()
sql = 'select * from dept'
cursor.execute(sql)
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
conexion.close()
print('Fin de programa')
<|reserved_special_token_1|>
import pyodbc
print("Primera consulta SQL Server")
servidor="LOCALHOST\SQLEXPRESS"
bbdd="HOSPITAL"
usuario="SA"
password="azure"
#CADENA CONEXION CON SEGURIDAD SQL SERVER (REMOTO)
cadenaconexion=("DRIVER={ODBC Driver 17 for SQL Server};SERVER=" + servidor
+ "; DATABASE=" + bbdd + "; UID=" + usuario + "; PWD=" + password)
print("Intentando conectar...")
conexion = pyodbc.connect(cadenaconexion)
print("Conectado!!!")
#CURSOR se crea con una conexión abierta
cursor = conexion.cursor()
#Necesitamos una consulta, el cursor maneja tanto consultas de selección (SELECT)
#como consultas de acción, no le importa
#Creamos la consulta select
sql = "select * from dept"
#El cursor ejecutará la consulta
cursor.execute(sql)
#Podemos, por ejemplo, recuperar una fila
row = cursor.fetchone()
#Vamos a dibujar la fila
print(row)
#Vamos a escribir otra vez fetchone()
row = cursor.fetchone()
print(row)
#Cada vez que ejecutamos el método fetch
#el cursor se mueve una fila
#No podemos volver a la fila anterior,
#tendríamos que ejecutar otra vez el método
#execute() de la conexión
#Vamos a pasarnos de filas a ver que sucede
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
#Siempre debemos cerrar el cursor despues de leer
conexion.close()
print("Fin de programa")
|
flexible
|
{
"blob_id": "0438f92aa9a36eaf1059244ec3be4397381f7a86",
"index": 6703,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Primera consulta SQL Server')\n<mask token>\nprint('Intentando conectar...')\n<mask token>\nprint('Conectado!!!')\n<mask token>\ncursor.execute(sql)\n<mask token>\nprint(row)\n<mask token>\nprint(row)\n<mask token>\nprint(row)\n<mask token>\nprint(row)\nconexion.close()\nprint('Fin de programa')\n",
"step-3": "<mask token>\nprint('Primera consulta SQL Server')\nservidor = 'LOCALHOST\\\\SQLEXPRESS'\nbbdd = 'HOSPITAL'\nusuario = 'SA'\npassword = 'azure'\ncadenaconexion = ('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +\n servidor + '; DATABASE=' + bbdd + '; UID=' + usuario + '; PWD=' + password)\nprint('Intentando conectar...')\nconexion = pyodbc.connect(cadenaconexion)\nprint('Conectado!!!')\ncursor = conexion.cursor()\nsql = 'select * from dept'\ncursor.execute(sql)\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\nconexion.close()\nprint('Fin de programa')\n",
"step-4": "import pyodbc\nprint('Primera consulta SQL Server')\nservidor = 'LOCALHOST\\\\SQLEXPRESS'\nbbdd = 'HOSPITAL'\nusuario = 'SA'\npassword = 'azure'\ncadenaconexion = ('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +\n servidor + '; DATABASE=' + bbdd + '; UID=' + usuario + '; PWD=' + password)\nprint('Intentando conectar...')\nconexion = pyodbc.connect(cadenaconexion)\nprint('Conectado!!!')\ncursor = conexion.cursor()\nsql = 'select * from dept'\ncursor.execute(sql)\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\nconexion.close()\nprint('Fin de programa')\n",
"step-5": "import pyodbc\n\nprint(\"Primera consulta SQL Server\")\nservidor=\"LOCALHOST\\SQLEXPRESS\"\nbbdd=\"HOSPITAL\"\nusuario=\"SA\"\npassword=\"azure\"\n\n#CADENA CONEXION CON SEGURIDAD SQL SERVER (REMOTO)\ncadenaconexion=(\"DRIVER={ODBC Driver 17 for SQL Server};SERVER=\" + servidor\n+ \"; DATABASE=\" + bbdd + \"; UID=\" + usuario + \"; PWD=\" + password)\n\nprint(\"Intentando conectar...\")\nconexion = pyodbc.connect(cadenaconexion)\nprint(\"Conectado!!!\")\n\n#CURSOR se crea con una conexión abierta\ncursor = conexion.cursor()\n#Necesitamos una consulta, el cursor maneja tanto consultas de selección (SELECT)\n#como consultas de acción, no le importa\n#Creamos la consulta select\nsql = \"select * from dept\"\n#El cursor ejecutará la consulta\ncursor.execute(sql)\n#Podemos, por ejemplo, recuperar una fila\nrow = cursor.fetchone()\n#Vamos a dibujar la fila\nprint(row)\n#Vamos a escribir otra vez fetchone()\nrow = cursor.fetchone()\nprint(row)\n#Cada vez que ejecutamos el método fetch\n#el cursor se mueve una fila\n#No podemos volver a la fila anterior,\n#tendríamos que ejecutar otra vez el método\n#execute() de la conexión\n#Vamos a pasarnos de filas a ver que sucede\nrow = cursor.fetchone()\nprint(row)\nrow = cursor.fetchone()\nprint(row)\n#Siempre debemos cerrar el cursor despues de leer\nconexion.close()\nprint(\"Fin de programa\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save(parameters):
with open('saves/save.zs', 'wb') as game_save:
dump(parameters, game_save)
game_save.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save(parameters):
with open('saves/save.zs', 'wb') as game_save:
dump(parameters, game_save)
game_save.close()
def load_settings():
try:
with open('saves/save.zs', 'rb') as game_save:
return load(game_save)
except FileNotFoundError:
return False
<|reserved_special_token_1|>
from pickle import dump, load
def save(parameters):
with open('saves/save.zs', 'wb') as game_save:
dump(parameters, game_save)
game_save.close()
def load_settings():
try:
with open('saves/save.zs', 'rb') as game_save:
return load(game_save)
except FileNotFoundError:
return False
<|reserved_special_token_1|>
from pickle import dump, load
def save(parameters):
# Функция сохранения прогресса в файл
with open('saves/save.zs', 'wb') as game_save:
dump(parameters, game_save)
game_save.close()
def load_settings():
# Функция загрузки сохранения при выборе опции продолжения игры
try:
with open('saves/save.zs', 'rb') as game_save:
return load(game_save)
except FileNotFoundError:
return False
|
flexible
|
{
"blob_id": "9d27b8844ab4070bb53afd89620177b89013956e",
"index": 4164,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef save(parameters):\n with open('saves/save.zs', 'wb') as game_save:\n dump(parameters, game_save)\n game_save.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save(parameters):\n with open('saves/save.zs', 'wb') as game_save:\n dump(parameters, game_save)\n game_save.close()\n\n\ndef load_settings():\n try:\n with open('saves/save.zs', 'rb') as game_save:\n return load(game_save)\n except FileNotFoundError:\n return False\n",
"step-4": "from pickle import dump, load\n\n\ndef save(parameters):\n with open('saves/save.zs', 'wb') as game_save:\n dump(parameters, game_save)\n game_save.close()\n\n\ndef load_settings():\n try:\n with open('saves/save.zs', 'rb') as game_save:\n return load(game_save)\n except FileNotFoundError:\n return False\n",
"step-5": "from pickle import dump, load\r\n\r\n\r\ndef save(parameters):\r\n # Функция сохранения прогресса в файл\r\n with open('saves/save.zs', 'wb') as game_save:\r\n dump(parameters, game_save)\r\n game_save.close()\r\n\r\n\r\ndef load_settings():\r\n # Функция загрузки сохранения при выборе опции продолжения игры\r\n try:\r\n with open('saves/save.zs', 'rb') as game_save:\r\n return load(game_save)\r\n except FileNotFoundError:\r\n return False\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def compare(long, short):
print(len(long) > len(short))
<|reserved_special_token_0|>
def exchange(a, b):
b = b - a
a = a + b
b = a - b
print('a=', a, 'b=', b)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compare(long, short):
print(len(long) > len(short))
compare(long_phrase, short_phrase)
<|reserved_special_token_0|>
for letter in text:
if letter not in d:
d[letter] = 1
if letter in d:
d[letter] += 1
<|reserved_special_token_0|>
print(result)
if len(text.replace('и', '')) < len(text.replace('а', '')):
print('В строке больше букв "и"')
else:
print('В строке больше букв "а"')
<|reserved_special_token_0|>
print('Объем файла равен {}Mb'.format(megabyte))
<|reserved_special_token_0|>
print(sin)
<|reserved_special_token_0|>
def exchange(a, b):
b = b - a
a = a + b
b = a - b
print('a=', a, 'b=', b)
exchange(120, 1)
<|reserved_special_token_0|>
st.split()
<|reserved_special_token_0|>
print(l)
<|reserved_special_token_0|>
for i in st:
k = k + 1
i = int(i) * 2 ** (l - k)
print(i)
new_num.append(i)
<|reserved_special_token_0|>
print(result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
long_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'
short_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'
def compare(long, short):
print(len(long) > len(short))
compare(long_phrase, short_phrase)
text = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'
d = dict()
for letter in text:
if letter not in d:
d[letter] = 1
if letter in d:
d[letter] += 1
result = 'В строке text {} букв "а" и {} букв "и"'.format(d['а'], d['и'])
print(result)
if len(text.replace('и', '')) < len(text.replace('а', '')):
print('В строке больше букв "и"')
else:
print('В строке больше букв "а"')
byte = 213680000
megabyte = byte / 10 ** 6
print('Объем файла равен {}Mb'.format(megabyte))
sin = math.sin(math.pi / 6)
print(sin)
<|reserved_special_token_0|>
def exchange(a, b):
b = b - a
a = a + b
b = a - b
print('a=', a, 'b=', b)
exchange(120, 1)
num = 10011
st = str(num)
st.split()
l = len(st) - 1
print(l)
new_num = list()
k = -1
for i in st:
k = k + 1
i = int(i) * 2 ** (l - k)
print(i)
new_num.append(i)
result = sum(new_num)
print(result)
<|reserved_special_token_1|>
import math
long_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'
short_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'
def compare(long, short):
print(len(long) > len(short))
compare(long_phrase, short_phrase)
text = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'
d = dict()
for letter in text:
if letter not in d:
d[letter] = 1
if letter in d:
d[letter] += 1
result = 'В строке text {} букв "а" и {} букв "и"'.format(d['а'], d['и'])
print(result)
if len(text.replace('и', '')) < len(text.replace('а', '')):
print('В строке больше букв "и"')
else:
print('В строке больше букв "а"')
byte = 213680000
megabyte = byte / 10 ** 6
print('Объем файла равен {}Mb'.format(megabyte))
sin = math.sin(math.pi / 6)
print(sin)
<|reserved_special_token_0|>
def exchange(a, b):
b = b - a
a = a + b
b = a - b
print('a=', a, 'b=', b)
exchange(120, 1)
num = 10011
st = str(num)
st.split()
l = len(st) - 1
print(l)
new_num = list()
k = -1
for i in st:
k = k + 1
i = int(i) * 2 ** (l - k)
print(i)
new_num.append(i)
result = sum(new_num)
print(result)
<|reserved_special_token_1|>
import math
# 1
long_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'
short_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'
def compare (long, short):
print(len(long)>len(short))
compare(long_phrase, short_phrase)
# 2.1
text = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'
d=dict()
for letter in text:
if letter not in d:
d[letter]=1
if letter in d:
d[letter]+=1
result='В строке text {} букв "а" и {} букв "и"'.format(d['а'], d['и'])
print(result)
# 2.2
if len(text.replace('и','')) < len(text.replace('а','')):
print('В строке больше букв "и"')
else:
print('В строке больше букв "а"')
# 3
byte=213680000
megabyte=byte/(10**6)
print('Объем файла равен {}Mb'.format(megabyte))
# 4
sin=math.sin(math.pi/6)
print(sin)
''' 5 дробные числа не могут быть представлены в точности в бинарном виде,
поэтому значения округляются, и такие операции,
как 0.1+0.2, дают неточный результат '''
# 5
def exchange (a, b):
b=b-a
a=a+b
b=a-b
print('a=',a,'b=',b)
exchange(120,1)
# 6
# разбиваем число на элементы, получаем спискок
num=10011
st=str(num)
st.split()
l=len(st)-1
print(l)
# создаем новый список куда добавим вычисляемые значения
new_num=list()
# вычисляем каждый элемент (умножение на 2 в степени номера эл-та)
k=-1
for i in st:
k=k+1
i=int(i)*(2**(l-k))
print(i)
new_num.append(i)
result=sum(new_num)
print(result)
|
flexible
|
{
"blob_id": "f29637cd670524baebac6549962a1c50fc1b91c6",
"index": 6835,
"step-1": "<mask token>\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\ncompare(long_phrase, short_phrase)\n<mask token>\nfor letter in text:\n if letter not in d:\n d[letter] = 1\n if letter in d:\n d[letter] += 1\n<mask token>\nprint(result)\nif len(text.replace('и', '')) < len(text.replace('а', '')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\n<mask token>\nprint('Объем файла равен {}Mb'.format(megabyte))\n<mask token>\nprint(sin)\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\nexchange(120, 1)\n<mask token>\nst.split()\n<mask token>\nprint(l)\n<mask token>\nfor i in st:\n k = k + 1\n i = int(i) * 2 ** (l - k)\n print(i)\n new_num.append(i)\n<mask token>\nprint(result)\n",
"step-3": "<mask token>\nlong_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'\nshort_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\ncompare(long_phrase, short_phrase)\ntext = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'\nd = dict()\nfor letter in text:\n if letter not in d:\n d[letter] = 1\n if letter in d:\n d[letter] += 1\nresult = 'В строке text {} букв \"а\" и {} букв \"и\"'.format(d['а'], d['и'])\nprint(result)\nif len(text.replace('и', '')) < len(text.replace('а', '')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\nbyte = 213680000\nmegabyte = byte / 10 ** 6\nprint('Объем файла равен {}Mb'.format(megabyte))\nsin = math.sin(math.pi / 6)\nprint(sin)\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\nexchange(120, 1)\nnum = 10011\nst = str(num)\nst.split()\nl = len(st) - 1\nprint(l)\nnew_num = list()\nk = -1\nfor i in st:\n k = k + 1\n i = int(i) * 2 ** (l - k)\n print(i)\n new_num.append(i)\nresult = sum(new_num)\nprint(result)\n",
"step-4": "import math\nlong_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'\nshort_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\ncompare(long_phrase, short_phrase)\ntext = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'\nd = dict()\nfor letter in text:\n if letter not in d:\n d[letter] = 1\n if letter in d:\n d[letter] += 1\nresult = 'В строке text {} букв \"а\" и {} букв \"и\"'.format(d['а'], d['и'])\nprint(result)\nif len(text.replace('и', '')) < len(text.replace('а', '')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\nbyte = 213680000\nmegabyte = byte / 10 ** 6\nprint('Объем файла равен {}Mb'.format(megabyte))\nsin = math.sin(math.pi / 6)\nprint(sin)\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\nexchange(120, 1)\nnum = 10011\nst = str(num)\nst.split()\nl = len(st) - 1\nprint(l)\nnew_num = list()\nk = -1\nfor i in st:\n k = k + 1\n i = int(i) * 2 ** (l - k)\n print(i)\n new_num.append(i)\nresult = sum(new_num)\nprint(result)\n",
"step-5": "import math\n\n# 1\nlong_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'\nshort_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'\ndef compare (long, short):\n\tprint(len(long)>len(short))\n\ncompare(long_phrase, short_phrase)\n\n# 2.1\ntext = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'\nd=dict()\nfor letter in text:\n if letter not in d:\n d[letter]=1\n if letter in d:\n d[letter]+=1\nresult='В строке text {} букв \"а\" и {} букв \"и\"'.format(d['а'], d['и'])\nprint(result)\n\n# 2.2\n\nif len(text.replace('и','')) < len(text.replace('а','')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\n\n# 3\nbyte=213680000\nmegabyte=byte/(10**6)\nprint('Объем файла равен {}Mb'.format(megabyte))\n\n# 4\nsin=math.sin(math.pi/6)\nprint(sin)\n\n''' 5 дробные числа не могут быть представлены в точности в бинарном виде,\nпоэтому значения округляются, и такие операции,\nкак 0.1+0.2, дают неточный результат '''\n\n\n# 5\ndef exchange (a, b):\n b=b-a\n a=a+b\n b=a-b\n print('a=',a,'b=',b)\nexchange(120,1)\n\n# 6\n# разбиваем число на элементы, получаем спискок\nnum=10011\nst=str(num)\nst.split()\nl=len(st)-1\nprint(l)\n# создаем новый список куда добавим вычисляемые значения\nnew_num=list()\n# вычисляем каждый элемент (умножение на 2 в степени номера эл-та)\nk=-1\nfor i in st:\n k=k+1\n i=int(i)*(2**(l-k))\n print(i)\n new_num.append(i)\nresult=sum(new_num)\nprint(result)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, n):
for j in range(i + 1, n):
for k in range(j + 1, n):
tmp = li[i] + li[j] + li[k]
if tmp <= m and max < tmp:
max = tmp
print(max)
<|reserved_special_token_1|>
n, m = map(int, input().split())
li = list(map(int, input().split()))
max = 0
for i in range(0, n):
for j in range(i + 1, n):
for k in range(j + 1, n):
tmp = li[i] + li[j] + li[k]
if tmp <= m and max < tmp:
max = tmp
print(max)
<|reserved_special_token_1|>
n, m = map(int, input().split())
li = list(map(int, input().split()))
max = 0
for i in range(0, n):
for j in range(i+1, n):
for k in range(j+1, n):
tmp = li[i] + li[j] + li[k]
if(tmp <= m and max < tmp):
max = tmp
print(max)
|
flexible
|
{
"blob_id": "83d0a32ef2d365d17caa9d311c367ed5828559ac",
"index": 4153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n tmp = li[i] + li[j] + li[k]\n if tmp <= m and max < tmp:\n max = tmp\nprint(max)\n",
"step-3": "n, m = map(int, input().split())\nli = list(map(int, input().split()))\nmax = 0\nfor i in range(0, n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n tmp = li[i] + li[j] + li[k]\n if tmp <= m and max < tmp:\n max = tmp\nprint(max)\n",
"step-4": "n, m = map(int, input().split())\nli = list(map(int, input().split()))\n\nmax = 0\nfor i in range(0, n):\n for j in range(i+1, n):\n for k in range(j+1, n):\n tmp = li[i] + li[j] + li[k]\n if(tmp <= m and max < tmp):\n max = tmp\n\nprint(max)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class EtherminePool(BasePool):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def build_creation_parameters(self, pool, pool_attrs, pool_classname):
params = super(EtherminePool, self).build_creation_parameters(pool,
pool_attrs, pool_classname)
server_location = 'US'
if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):
server_location = 'Europe'
elif pool.startswith('us1-etc'):
server_location = 'US'
elif pool.startswith('us1.eth'):
server_location = 'US East'
elif pool.startswith('us2.eth'):
server_location = 'US West'
elif pool.startswith('asia1.eth'):
server_location = 'Asia'
params['unique_id'
] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'
return params
<|reserved_special_token_0|>
def get_worker_stats(self, miner, worker):
url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.
_clean_coin_address(miner)).replace('{WORKER}', worker)
api = RestAPI(url=url, port=80)
return api.get_json()
def get_miner_stats(self, miner):
url = self._MINER_URL_PER_MINER.replace('{MINER}', self.
_clean_coin_address(miner))
api = RestAPI(url=url, port=80)
return api.get_json()
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):
if algo == 'ethash':
algo_idx = get_algo_index('daggerhashimoto')
else:
algo_idx = get_algo_index(algo)
if algo_idx is -1:
return False
coin_idx = get_coin_index(self._DEFAULT_COIN_)
coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')
success = False
json = self.get_worker_stats(miner, worker)
if json:
success = self.parse_json(json, results, miner, worker, pool_id,
algo, algo_idx, coin_idx, coin_cost)
return success
def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost):
record = json['data']
if record == 'NO DATA':
miner_coin_idx = None
if hasattr(miner, 'coin_idx'):
miner_coin_idx = miner.coin
if miner_coin_idx is None or miner_coin_idx != coin_idx:
miner.coin_address = ''
return False
speed_suffix = 'H'
try:
speed_accepted = float(record['currentHashrate'])
except:
speed_accepted = 0.0
try:
speed_reported = float(record['reportedHashrate'])
except:
speed_reported = None
json_miner_stats = self.get_miner_stats(miner)
record_miner_stats = json_miner_stats['data']
try:
coins_per_minute = float(record_miner_stats['coinsPerMin'])
except:
coins_per_minute = 0.0
try:
active_workers = float(record_miner_stats['activeWorkers'])
except:
active_workers = 1
profitability = coins_per_minute * (60 * 24
) / speed_accepted / active_workers
results.populate_pool_results(miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost, profitability, speed_accepted,
speed_reported, speed_suffix)
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EtherminePool(BasePool):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, pool, pool_attrs):
super(EtherminePool, self).__init__(pool, pool_attrs)
def build_creation_parameters(self, pool, pool_attrs, pool_classname):
params = super(EtherminePool, self).build_creation_parameters(pool,
pool_attrs, pool_classname)
server_location = 'US'
if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):
server_location = 'Europe'
elif pool.startswith('us1-etc'):
server_location = 'US'
elif pool.startswith('us1.eth'):
server_location = 'US East'
elif pool.startswith('us2.eth'):
server_location = 'US West'
elif pool.startswith('asia1.eth'):
server_location = 'Asia'
params['unique_id'
] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'
return params
def _clean_coin_address(self, miner):
coin_address = miner.coin_address.lower()
if coin_address.startswith('0x'):
coin_address = coin_address[2:]
elif coin_address.startswith('#0x'):
coin_address = coin_address[3:]
return coin_address
def get_worker_stats(self, miner, worker):
url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.
_clean_coin_address(miner)).replace('{WORKER}', worker)
api = RestAPI(url=url, port=80)
return api.get_json()
def get_miner_stats(self, miner):
url = self._MINER_URL_PER_MINER.replace('{MINER}', self.
_clean_coin_address(miner))
api = RestAPI(url=url, port=80)
return api.get_json()
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):
if algo == 'ethash':
algo_idx = get_algo_index('daggerhashimoto')
else:
algo_idx = get_algo_index(algo)
if algo_idx is -1:
return False
coin_idx = get_coin_index(self._DEFAULT_COIN_)
coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')
success = False
json = self.get_worker_stats(miner, worker)
if json:
success = self.parse_json(json, results, miner, worker, pool_id,
algo, algo_idx, coin_idx, coin_cost)
return success
def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost):
record = json['data']
if record == 'NO DATA':
miner_coin_idx = None
if hasattr(miner, 'coin_idx'):
miner_coin_idx = miner.coin
if miner_coin_idx is None or miner_coin_idx != coin_idx:
miner.coin_address = ''
return False
speed_suffix = 'H'
try:
speed_accepted = float(record['currentHashrate'])
except:
speed_accepted = 0.0
try:
speed_reported = float(record['reportedHashrate'])
except:
speed_reported = None
json_miner_stats = self.get_miner_stats(miner)
record_miner_stats = json_miner_stats['data']
try:
coins_per_minute = float(record_miner_stats['coinsPerMin'])
except:
coins_per_minute = 0.0
try:
active_workers = float(record_miner_stats['activeWorkers'])
except:
active_workers = 1
profitability = coins_per_minute * (60 * 24
) / speed_accepted / active_workers
results.populate_pool_results(miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost, profitability, speed_accepted,
speed_reported, speed_suffix)
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EtherminePool(BasePool):
_MINER_URL_PER_WORKER = (
'https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats'
)
_MINER_URL_PER_MINER = (
'https://api.ethermine.org/miner/:{MINER}/currentStats')
_DEFAULT_COIN_ = 'ETH'
def __init__(self, pool, pool_attrs):
super(EtherminePool, self).__init__(pool, pool_attrs)
def build_creation_parameters(self, pool, pool_attrs, pool_classname):
params = super(EtherminePool, self).build_creation_parameters(pool,
pool_attrs, pool_classname)
server_location = 'US'
if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):
server_location = 'Europe'
elif pool.startswith('us1-etc'):
server_location = 'US'
elif pool.startswith('us1.eth'):
server_location = 'US East'
elif pool.startswith('us2.eth'):
server_location = 'US West'
elif pool.startswith('asia1.eth'):
server_location = 'Asia'
params['unique_id'
] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'
return params
def _clean_coin_address(self, miner):
coin_address = miner.coin_address.lower()
if coin_address.startswith('0x'):
coin_address = coin_address[2:]
elif coin_address.startswith('#0x'):
coin_address = coin_address[3:]
return coin_address
def get_worker_stats(self, miner, worker):
url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.
_clean_coin_address(miner)).replace('{WORKER}', worker)
api = RestAPI(url=url, port=80)
return api.get_json()
def get_miner_stats(self, miner):
url = self._MINER_URL_PER_MINER.replace('{MINER}', self.
_clean_coin_address(miner))
api = RestAPI(url=url, port=80)
return api.get_json()
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):
if algo == 'ethash':
algo_idx = get_algo_index('daggerhashimoto')
else:
algo_idx = get_algo_index(algo)
if algo_idx is -1:
return False
coin_idx = get_coin_index(self._DEFAULT_COIN_)
coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')
success = False
json = self.get_worker_stats(miner, worker)
if json:
success = self.parse_json(json, results, miner, worker, pool_id,
algo, algo_idx, coin_idx, coin_cost)
return success
def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost):
record = json['data']
if record == 'NO DATA':
miner_coin_idx = None
if hasattr(miner, 'coin_idx'):
miner_coin_idx = miner.coin
if miner_coin_idx is None or miner_coin_idx != coin_idx:
miner.coin_address = ''
return False
speed_suffix = 'H'
try:
speed_accepted = float(record['currentHashrate'])
except:
speed_accepted = 0.0
try:
speed_reported = float(record['reportedHashrate'])
except:
speed_reported = None
json_miner_stats = self.get_miner_stats(miner)
record_miner_stats = json_miner_stats['data']
try:
coins_per_minute = float(record_miner_stats['coinsPerMin'])
except:
coins_per_minute = 0.0
try:
active_workers = float(record_miner_stats['activeWorkers'])
except:
active_workers = 1
profitability = coins_per_minute * (60 * 24
) / speed_accepted / active_workers
results.populate_pool_results(miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost, profitability, speed_accepted,
speed_reported, speed_suffix)
return True
<|reserved_special_token_1|>
from minermedic.pools.base_pool import BasePool
from phenome_core.util.rest_api import RestAPI
from minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost
<|reserved_special_token_0|>
class EtherminePool(BasePool):
_MINER_URL_PER_WORKER = (
'https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats'
)
_MINER_URL_PER_MINER = (
'https://api.ethermine.org/miner/:{MINER}/currentStats')
_DEFAULT_COIN_ = 'ETH'
def __init__(self, pool, pool_attrs):
super(EtherminePool, self).__init__(pool, pool_attrs)
def build_creation_parameters(self, pool, pool_attrs, pool_classname):
params = super(EtherminePool, self).build_creation_parameters(pool,
pool_attrs, pool_classname)
server_location = 'US'
if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):
server_location = 'Europe'
elif pool.startswith('us1-etc'):
server_location = 'US'
elif pool.startswith('us1.eth'):
server_location = 'US East'
elif pool.startswith('us2.eth'):
server_location = 'US West'
elif pool.startswith('asia1.eth'):
server_location = 'Asia'
params['unique_id'
] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'
return params
def _clean_coin_address(self, miner):
coin_address = miner.coin_address.lower()
if coin_address.startswith('0x'):
coin_address = coin_address[2:]
elif coin_address.startswith('#0x'):
coin_address = coin_address[3:]
return coin_address
def get_worker_stats(self, miner, worker):
url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.
_clean_coin_address(miner)).replace('{WORKER}', worker)
api = RestAPI(url=url, port=80)
return api.get_json()
def get_miner_stats(self, miner):
url = self._MINER_URL_PER_MINER.replace('{MINER}', self.
_clean_coin_address(miner))
api = RestAPI(url=url, port=80)
return api.get_json()
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):
if algo == 'ethash':
algo_idx = get_algo_index('daggerhashimoto')
else:
algo_idx = get_algo_index(algo)
if algo_idx is -1:
return False
coin_idx = get_coin_index(self._DEFAULT_COIN_)
coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')
success = False
json = self.get_worker_stats(miner, worker)
if json:
success = self.parse_json(json, results, miner, worker, pool_id,
algo, algo_idx, coin_idx, coin_cost)
return success
def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost):
record = json['data']
if record == 'NO DATA':
miner_coin_idx = None
if hasattr(miner, 'coin_idx'):
miner_coin_idx = miner.coin
if miner_coin_idx is None or miner_coin_idx != coin_idx:
miner.coin_address = ''
return False
speed_suffix = 'H'
try:
speed_accepted = float(record['currentHashrate'])
except:
speed_accepted = 0.0
try:
speed_reported = float(record['reportedHashrate'])
except:
speed_reported = None
json_miner_stats = self.get_miner_stats(miner)
record_miner_stats = json_miner_stats['data']
try:
coins_per_minute = float(record_miner_stats['coinsPerMin'])
except:
coins_per_minute = 0.0
try:
active_workers = float(record_miner_stats['activeWorkers'])
except:
active_workers = 1
profitability = coins_per_minute * (60 * 24
) / speed_accepted / active_workers
results.populate_pool_results(miner, worker, pool, algo, algo_idx,
coin_idx, coin_cost, profitability, speed_accepted,
speed_reported, speed_suffix)
return True
<|reserved_special_token_1|>
# ethermine.py, Copyright (c) 2019, Nicholas Saparoff <nick.saparoff@gmail.com>: Original implementation
from minermedic.pools.base_pool import BasePool
from phenome_core.util.rest_api import RestAPI
from minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost
"""
EtherminePool
This is the main Pool API for Ethermine.
SEE: https://ethermine.org/api/worker#monitoring
"""
class EtherminePool(BasePool):
# PER WORKER
_MINER_URL_PER_WORKER = "https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats"
# PER MINER
_MINER_URL_PER_MINER = "https://api.ethermine.org/miner/:{MINER}/currentStats"
# with Ethermine, the coin is Usually ETH, but could be ETC or ZCASH
_DEFAULT_COIN_ = "ETH"
def __init__(self, pool, pool_attrs):
super(EtherminePool, self).__init__(pool, pool_attrs)
def build_creation_parameters(self, pool, pool_attrs, pool_classname):
# get the default creation parameters
params = super(EtherminePool, self).build_creation_parameters(pool, pool_attrs, pool_classname)
server_location = "US"
if pool.startswith("eu1.etc") or pool.startswith("eu1.eth"):
server_location = "Europe"
elif pool.startswith("us1-etc"):
server_location = "US"
elif pool.startswith("us1.eth"):
server_location = "US East"
elif pool.startswith("us2.eth"):
server_location = "US West"
elif pool.startswith("asia1.eth"):
server_location = "Asia"
# Set the unique ID of the pool (give it a NAME, as the URL/IP may change)
# POOL - LOCATION (COIN)
params['unique_id'] = "ETHERMINE - " + server_location + " (" + self._DEFAULT_COIN_ + ")"
return params
def _clean_coin_address(self, miner):
coin_address = miner.coin_address.lower()
if coin_address.startswith('0x'):
coin_address = coin_address[2:]
elif coin_address.startswith('#0x'):
coin_address = coin_address[3:]
return coin_address
def get_worker_stats(self, miner, worker):
# build the miner URL
url = self._MINER_URL_PER_WORKER.replace("{MINER}",self._clean_coin_address(miner)).replace("{WORKER}",worker)
api = RestAPI(url=url, port=80)
return api.get_json()
def get_miner_stats(self, miner):
# build the miner URL
url = self._MINER_URL_PER_MINER.replace("{MINER}", self._clean_coin_address(miner))
api = RestAPI(url=url, port=80)
return api.get_json()
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):
if algo == 'ethash':
algo_idx = get_algo_index('daggerhashimoto')
else:
algo_idx = get_algo_index(algo)
if algo_idx is -1:
return False
coin_idx = get_coin_index(self._DEFAULT_COIN_)
# get the cost of the coin
# TODO - get the currency from the config, do not assume USD
coin_cost = get_coin_cost(self._DEFAULT_COIN_,'USD')
success = False
json = self.get_worker_stats(miner, worker)
if json:
success = self.parse_json(json, results, miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost)
return success
def parse_json(self, json, results, miner, worker, pool, algo, algo_idx, coin_idx, coin_cost):
# get the record
record = json['data']
if record == 'NO DATA':
# check coin switch?
miner_coin_idx = None
if hasattr(miner, 'coin_idx'):
# we have been mining so far
miner_coin_idx = miner.coin
if miner_coin_idx is None or miner_coin_idx != coin_idx:
# reset the coin address, maybe switched coin
miner.coin_address = ''
# no data, just fail
return False
# API call results, speed is in units of Hashes
speed_suffix = 'H'
try:
# get accepted hashrate
speed_accepted = float(record['currentHashrate'])
except:
speed_accepted = 0.0
try:
# get "reported" hashrate
speed_reported = float(record['reportedHashrate'])
except:
speed_reported = None
# now get the miner stats for profitability
json_miner_stats = self.get_miner_stats(miner)
# get the record
record_miner_stats = json_miner_stats['data']
try:
coins_per_minute = float(record_miner_stats['coinsPerMin'])
except:
coins_per_minute = 0.0
try:
active_workers = float(record_miner_stats['activeWorkers'])
except:
active_workers = 1
# profitability is a measure of COIN / speed suffix / per DAY
# ETHERMINE only gives coin estimates per MINER per MINUTE, not per WORKER
# so we need to average it out by dividing by the # of active workers
profitability = ((coins_per_minute * (60 * 24))/speed_accepted)/active_workers
# finally set the API results into the main results object
results.populate_pool_results(miner, worker, pool, algo, algo_idx, coin_idx, coin_cost, profitability,
speed_accepted, speed_reported, speed_suffix)
# if we got here, we were successful
return True
|
flexible
|
{
"blob_id": "921c7255fad46c767f2ec1030ef9498da05b9bb1",
"index": 9958,
"step-1": "<mask token>\n\n\nclass EtherminePool(BasePool):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n <mask token>\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-2": "<mask token>\n\n\nclass EtherminePool(BasePool):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n\n def _clean_coin_address(self, miner):\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-3": "<mask token>\n\n\nclass EtherminePool(BasePool):\n _MINER_URL_PER_WORKER = (\n 'https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats'\n )\n _MINER_URL_PER_MINER = (\n 'https://api.ethermine.org/miner/:{MINER}/currentStats')\n _DEFAULT_COIN_ = 'ETH'\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n\n def _clean_coin_address(self, miner):\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-4": "from minermedic.pools.base_pool import BasePool\nfrom phenome_core.util.rest_api import RestAPI\nfrom minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost\n<mask token>\n\n\nclass EtherminePool(BasePool):\n _MINER_URL_PER_WORKER = (\n 'https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats'\n )\n _MINER_URL_PER_MINER = (\n 'https://api.ethermine.org/miner/:{MINER}/currentStats')\n _DEFAULT_COIN_ = 'ETH'\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n\n def _clean_coin_address(self, miner):\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-5": "# ethermine.py, Copyright (c) 2019, Nicholas Saparoff <nick.saparoff@gmail.com>: Original implementation\n\nfrom minermedic.pools.base_pool import BasePool\nfrom phenome_core.util.rest_api import RestAPI\nfrom minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost\n\n\"\"\"\n\nEtherminePool\n\n This is the main Pool API for Ethermine.\n SEE: https://ethermine.org/api/worker#monitoring\n \n\"\"\"\n\n\nclass EtherminePool(BasePool):\n\n # PER WORKER\n _MINER_URL_PER_WORKER = \"https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats\"\n\n # PER MINER\n _MINER_URL_PER_MINER = \"https://api.ethermine.org/miner/:{MINER}/currentStats\"\n\n # with Ethermine, the coin is Usually ETH, but could be ETC or ZCASH\n _DEFAULT_COIN_ = \"ETH\"\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n\n # get the default creation parameters\n params = super(EtherminePool, self).build_creation_parameters(pool, pool_attrs, pool_classname)\n\n server_location = \"US\"\n\n if pool.startswith(\"eu1.etc\") or pool.startswith(\"eu1.eth\"):\n server_location = \"Europe\"\n elif pool.startswith(\"us1-etc\"):\n server_location = \"US\"\n elif pool.startswith(\"us1.eth\"):\n server_location = \"US East\"\n elif pool.startswith(\"us2.eth\"):\n server_location = \"US West\"\n elif pool.startswith(\"asia1.eth\"):\n server_location = \"Asia\"\n\n # Set the unique ID of the pool (give it a NAME, as the URL/IP may change)\n # POOL - LOCATION (COIN)\n params['unique_id'] = \"ETHERMINE - \" + server_location + \" (\" + self._DEFAULT_COIN_ + \")\"\n\n return params\n\n def _clean_coin_address(self, miner):\n\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n\n # build the miner URL\n url = self._MINER_URL_PER_WORKER.replace(\"{MINER}\",self._clean_coin_address(miner)).replace(\"{WORKER}\",worker)\n\n api = RestAPI(url=url, port=80)\n\n return api.get_json()\n\n def get_miner_stats(self, miner):\n\n # build the miner URL\n url = self._MINER_URL_PER_MINER.replace(\"{MINER}\", self._clean_coin_address(miner))\n\n api = RestAPI(url=url, port=80)\n\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n\n if algo_idx is -1:\n return False\n\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n\n # get the cost of the coin\n # TODO - get the currency from the config, do not assume USD\n coin_cost = get_coin_cost(self._DEFAULT_COIN_,'USD')\n\n success = False\n\n json = self.get_worker_stats(miner, worker)\n\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost)\n\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx, coin_idx, coin_cost):\n\n # get the record\n record = json['data']\n\n if record == 'NO DATA':\n\n # check coin switch?\n miner_coin_idx = None\n\n if hasattr(miner, 'coin_idx'):\n # we have been mining so far\n miner_coin_idx = miner.coin\n\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n # reset the coin address, maybe switched coin\n miner.coin_address = ''\n\n # no data, just fail\n return False\n\n # API call results, speed is in units of Hashes\n speed_suffix = 'H'\n\n try:\n # get accepted hashrate\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n\n try:\n # get \"reported\" hashrate\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n\n # now get the miner stats for profitability\n json_miner_stats = self.get_miner_stats(miner)\n\n # get the record\n record_miner_stats = json_miner_stats['data']\n\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n\n # profitability is a measure of COIN / speed suffix / per DAY\n # ETHERMINE only gives coin estimates per MINER per MINUTE, not per WORKER\n # so we need to average it out by dividing by the # of active workers\n profitability = ((coins_per_minute * (60 * 24))/speed_accepted)/active_workers\n\n # finally set the API results into the main results object\n results.populate_pool_results(miner, worker, pool, algo, algo_idx, coin_idx, coin_cost, profitability,\n speed_accepted, speed_reported, speed_suffix)\n\n # if we got here, we were successful\n return True\n\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, 'r')
f.read()
def print_text_file(strfile):
f = open(strfile, 'r')
print(f.read(3))
def load_text_file_to_class(strfile):
t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')
f = open(strfile, 'r')
next(f)
for line in f:
d = []
d = line.split('\t')
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[
17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, 'r')
f.read()
def print_text_file(strfile):
f = open(strfile, 'r')
print(f.read(3))
def load_text_file_to_class(strfile):
t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')
f = open(strfile, 'r')
next(f)
for line in f:
d = []
d = line.split('\t')
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[
17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
start_end_timer()
<|reserved_special_token_0|>
load_text_file_to_class(strfile)
start_end_timer()
<|reserved_special_token_1|>
import time
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, 'r')
f.read()
def print_text_file(strfile):
f = open(strfile, 'r')
print(f.read(3))
def load_text_file_to_class(strfile):
t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')
f = open(strfile, 'r')
next(f)
for line in f:
d = []
d = line.split('\t')
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[
17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
start_end_timer()
strfile = '/home/student/Desktop/schooldata/pubdistricts.txt'
load_text_file_to_class(strfile)
start_end_timer()
<|reserved_special_token_1|>
import time
class DISTRICT:
def __init__(
self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, "r")
f.read()
def print_text_file(strfile):
f = open(strfile, "r")
print(f.read(3))
def load_text_file_to_class(strfile):
t = open("/home/student/Desktop/schooldata/copiedfile.txt", "w")
f = open(strfile, "r")
next(f)
for line in f:
d = []
d = line.split("\t")
# print(d)
# t.write(d)
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11],
d[12], d[13], d[14], d[15], d[16], d[17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
start_end_timer()
strfile = "/home/student/Desktop/schooldata/pubdistricts.txt"
load_text_file_to_class(strfile)
start_end_timer()
|
flexible
|
{
"blob_id": "462d73195680118d19a3d4e8a855e65aaeecb3c6",
"index": 892,
"step-1": "<mask token>\n\n\nclass DISTRICT:\n\n def __init__(self, cdcode, county, district, street, city, zipcode,\n state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,\n faxnumber, email, admfname, admlname, admemail, lat, long,\n distrownercode, doctype, statustype, lastupdate):\n self.cdcode = cdcode\n self.county = county\n self.district = district\n self.street = street\n self.city = city\n self.zipcode = zipcode\n self.state = state\n self.mailstreet = mailstreet\n self.mailcity = mailcity\n self.mailzip = mailzip\n self.mailstate = mailstate\n self.phone = phone\n self.extphone = extphone\n self.faxnumber = faxnumber\n self.email = email\n self.admfname = admfname\n self.admlname = admlname\n self.admemail = admemail\n self.lat = lat\n self.long = long\n self.distrownercode = distrownercode\n self.doctype = doctype\n self.statustype = statustype\n self.lastupdate = lastupdate\n\n def get_district_name(self):\n print(self.district)\n\n def get_district_cdcode(self):\n print(self.cdcode)\n\n def get_district_statustype(self):\n print(self.statustype)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DISTRICT:\n\n def __init__(self, cdcode, county, district, street, city, zipcode,\n state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,\n faxnumber, email, admfname, admlname, admemail, lat, long,\n distrownercode, doctype, statustype, lastupdate):\n self.cdcode = cdcode\n self.county = county\n self.district = district\n self.street = street\n self.city = city\n self.zipcode = zipcode\n self.state = state\n self.mailstreet = mailstreet\n self.mailcity = mailcity\n self.mailzip = mailzip\n self.mailstate = mailstate\n self.phone = phone\n self.extphone = extphone\n self.faxnumber = faxnumber\n self.email = email\n self.admfname = admfname\n self.admlname = admlname\n self.admemail = admemail\n self.lat = lat\n self.long = long\n self.distrownercode = distrownercode\n self.doctype = doctype\n self.statustype = statustype\n self.lastupdate = lastupdate\n\n def get_district_name(self):\n print(self.district)\n\n def get_district_cdcode(self):\n print(self.cdcode)\n\n def get_district_statustype(self):\n print(self.statustype)\n\n\ndef start_end_timer():\n print(time.perf_counter())\n\n\ndef read_text_file(strfile):\n f = open(strfile, 'r')\n f.read()\n\n\ndef print_text_file(strfile):\n f = open(strfile, 'r')\n print(f.read(3))\n\n\ndef load_text_file_to_class(strfile):\n t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')\n f = open(strfile, 'r')\n next(f)\n for line in f:\n d = []\n d = line.split('\\t')\n district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],\n d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[\n 17], d[18], d[19], d[20], d[21], d[22], d[23])\n district.get_district_name()\n district.get_district_cdcode()\n district.get_district_statustype()\n f.close()\n t.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DISTRICT:\n\n def __init__(self, cdcode, county, district, street, city, zipcode,\n state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,\n faxnumber, email, admfname, admlname, admemail, lat, long,\n distrownercode, doctype, statustype, lastupdate):\n self.cdcode = cdcode\n self.county = county\n self.district = district\n self.street = street\n self.city = city\n self.zipcode = zipcode\n self.state = state\n self.mailstreet = mailstreet\n self.mailcity = mailcity\n self.mailzip = mailzip\n self.mailstate = mailstate\n self.phone = phone\n self.extphone = extphone\n self.faxnumber = faxnumber\n self.email = email\n self.admfname = admfname\n self.admlname = admlname\n self.admemail = admemail\n self.lat = lat\n self.long = long\n self.distrownercode = distrownercode\n self.doctype = doctype\n self.statustype = statustype\n self.lastupdate = lastupdate\n\n def get_district_name(self):\n print(self.district)\n\n def get_district_cdcode(self):\n print(self.cdcode)\n\n def get_district_statustype(self):\n print(self.statustype)\n\n\ndef start_end_timer():\n print(time.perf_counter())\n\n\ndef read_text_file(strfile):\n f = open(strfile, 'r')\n f.read()\n\n\ndef print_text_file(strfile):\n f = open(strfile, 'r')\n print(f.read(3))\n\n\ndef load_text_file_to_class(strfile):\n t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')\n f = open(strfile, 'r')\n next(f)\n for line in f:\n d = []\n d = line.split('\\t')\n district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],\n d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[\n 17], d[18], d[19], d[20], d[21], d[22], d[23])\n district.get_district_name()\n district.get_district_cdcode()\n district.get_district_statustype()\n f.close()\n t.close()\n\n\nstart_end_timer()\n<mask token>\nload_text_file_to_class(strfile)\nstart_end_timer()\n",
"step-4": "import time\n\n\nclass DISTRICT:\n\n def __init__(self, cdcode, county, district, street, city, zipcode,\n state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,\n faxnumber, email, admfname, admlname, admemail, lat, long,\n distrownercode, doctype, statustype, lastupdate):\n self.cdcode = cdcode\n self.county = county\n self.district = district\n self.street = street\n self.city = city\n self.zipcode = zipcode\n self.state = state\n self.mailstreet = mailstreet\n self.mailcity = mailcity\n self.mailzip = mailzip\n self.mailstate = mailstate\n self.phone = phone\n self.extphone = extphone\n self.faxnumber = faxnumber\n self.email = email\n self.admfname = admfname\n self.admlname = admlname\n self.admemail = admemail\n self.lat = lat\n self.long = long\n self.distrownercode = distrownercode\n self.doctype = doctype\n self.statustype = statustype\n self.lastupdate = lastupdate\n\n def get_district_name(self):\n print(self.district)\n\n def get_district_cdcode(self):\n print(self.cdcode)\n\n def get_district_statustype(self):\n print(self.statustype)\n\n\ndef start_end_timer():\n print(time.perf_counter())\n\n\ndef read_text_file(strfile):\n f = open(strfile, 'r')\n f.read()\n\n\ndef print_text_file(strfile):\n f = open(strfile, 'r')\n print(f.read(3))\n\n\ndef load_text_file_to_class(strfile):\n t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')\n f = open(strfile, 'r')\n next(f)\n for line in f:\n d = []\n d = line.split('\\t')\n district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],\n d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[\n 17], d[18], d[19], d[20], d[21], d[22], d[23])\n district.get_district_name()\n district.get_district_cdcode()\n district.get_district_statustype()\n f.close()\n t.close()\n\n\nstart_end_timer()\nstrfile = '/home/student/Desktop/schooldata/pubdistricts.txt'\nload_text_file_to_class(strfile)\nstart_end_timer()\n",
"step-5": "import time\n\n\nclass DISTRICT:\n\n def __init__(\n self, cdcode, county, district, street, city, zipcode,\n state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,\n faxnumber, email, admfname, admlname, admemail, lat, long,\n distrownercode, doctype, statustype, lastupdate):\n\n self.cdcode = cdcode\n self.county = county\n self.district = district\n self.street = street\n self.city = city\n self.zipcode = zipcode\n self.state = state\n self.mailstreet = mailstreet\n self.mailcity = mailcity\n self.mailzip = mailzip\n self.mailstate = mailstate\n self.phone = phone\n self.extphone = extphone\n self.faxnumber = faxnumber\n self.email = email\n self.admfname = admfname\n self.admlname = admlname\n self.admemail = admemail\n self.lat = lat\n self.long = long\n self.distrownercode = distrownercode\n self.doctype = doctype\n self.statustype = statustype\n self.lastupdate = lastupdate\n\n def get_district_name(self):\n print(self.district)\n\n def get_district_cdcode(self):\n print(self.cdcode)\n\n def get_district_statustype(self):\n print(self.statustype)\n\ndef start_end_timer():\n print(time.perf_counter())\n\n\ndef read_text_file(strfile):\n f = open(strfile, \"r\")\n f.read()\n\n\ndef print_text_file(strfile):\n f = open(strfile, \"r\")\n print(f.read(3))\n\n\ndef load_text_file_to_class(strfile):\n t = open(\"/home/student/Desktop/schooldata/copiedfile.txt\", \"w\")\n f = open(strfile, \"r\")\n next(f)\n\n\n for line in f:\n d = []\n d = line.split(\"\\t\")\n # print(d)\n # t.write(d)\n district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11],\n d[12], d[13], d[14], d[15], d[16], d[17], d[18], d[19], d[20], d[21], d[22], d[23])\n district.get_district_name()\n district.get_district_cdcode()\n district.get_district_statustype()\n\n f.close()\n t.close()\n\n\nstart_end_timer()\nstrfile = \"/home/student/Desktop/schooldata/pubdistricts.txt\"\nload_text_file_to_class(strfile)\nstart_end_timer()\n\n",
"step-ids": [
5,
9,
10,
12,
13
]
}
|
[
5,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
class TestTask(unittest.TestCase):
<|reserved_special_token_0|>
def test_init_bad_invalid_filename(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\sadfsda.py'
, 'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=', 'frequency=1D', 'time=09:45',
'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTask(unittest.TestCase):
def test_init(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09:45', 'description=invalid time']
task = Task(task_attr)
self.assertEqual(task.basename(), 'dummy_task2.py')
self.assertEqual(task.frequency, str('1D').lower())
self.assertEqual(task.filename,
'C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
)
self.assertEqual(task.time, '09:45')
self.assertTrue(task.is_good())
def test_init_bad_invalid_filename(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\sadfsda.py'
, 'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=', 'frequency=1D', 'time=09:45',
'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_bad_invalid_time(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09:45:924355435',
'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_init_good_zero_frequency(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=0minute', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertTrue(task.is_good())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTask(unittest.TestCase):
def test_init(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09:45', 'description=invalid time']
task = Task(task_attr)
self.assertEqual(task.basename(), 'dummy_task2.py')
self.assertEqual(task.frequency, str('1D').lower())
self.assertEqual(task.filename,
'C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
)
self.assertEqual(task.time, '09:45')
self.assertTrue(task.is_good())
def test_init_bad_invalid_filename(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\sadfsda.py'
, 'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=', 'frequency=1D', 'time=09:45',
'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_bad_invalid_time(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09:45:924355435',
'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
<|reserved_special_token_0|>
def test_init_bad_invalid_frequency(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=D', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=2S', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_zero_frequency(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=0minute', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertTrue(task.is_good())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTask(unittest.TestCase):
def test_init(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09:45', 'description=invalid time']
task = Task(task_attr)
self.assertEqual(task.basename(), 'dummy_task2.py')
self.assertEqual(task.frequency, str('1D').lower())
self.assertEqual(task.filename,
'C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
)
self.assertEqual(task.time, '09:45')
self.assertTrue(task.is_good())
def test_init_bad_invalid_filename(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\sadfsda.py'
, 'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=', 'frequency=1D', 'time=09:45',
'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_bad_invalid_time(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09:45:924355435',
'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=09924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_empty_time(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1D', 'time=', 'description=']
task = Task(task_attr)
self.assertTrue(task.is_good())
def test_init_bad_invalid_frequency(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=D', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=2S', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_zero_frequency(self):
task_attr = [
'filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py'
, 'frequency=0minute', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertTrue(task.is_good())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import unittest
from dispatcher.task import *
from mock import *
class TestTask(unittest.TestCase):
def test_init(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=09:45', 'description=invalid time']
task = Task(task_attr)
self.assertEqual(task.basename(), 'dummy_task2.py')
self.assertEqual(task.frequency, str('1D').lower())
self.assertEqual(task.filename, 'C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py')
self.assertEqual(task.time, '09:45')
self.assertTrue(task.is_good())
def test_init_bad_invalid_filename(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\sadfsda.py',
'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=',
'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_bad_invalid_time(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=0ssss9:45', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=09:45:924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=09924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_empty_time(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=', 'description=']
task = Task(task_attr)
self.assertTrue(task.is_good())
def test_init_bad_invalid_frequency(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1Dhhhh', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=D', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=2S', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_zero_frequency(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=0minute', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertTrue(task.is_good())
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "86c053b7d4c752182965755ad5b6ba6937ce6f86",
"index": 5984,
"step-1": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n <mask token>\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n\n def test_init(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename,\n 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n )\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45:924355435',\n 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n <mask token>\n <mask token>\n\n def test_init_good_zero_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n\n def test_init(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename,\n 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n )\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45:924355435',\n 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n <mask token>\n\n def test_init_bad_invalid_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=D', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=2S', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_zero_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n\n def test_init(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename,\n 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n )\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45:924355435',\n 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_empty_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=', 'description=']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=D', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=2S', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_zero_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n\n<mask token>\n",
"step-5": "import unittest\nfrom dispatcher.task import *\nfrom mock import *\n\nclass TestTask(unittest.TestCase):\n def test_init(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename, 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py')\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py',\n 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=',\n 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=09:45:924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_empty_time(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=', 'description=']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_frequency(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=D', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=2S', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_zero_frequency(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\nif __name__ == '__main__':\n unittest.main()",
"step-ids": [
2,
5,
6,
7,
10
]
}
|
[
2,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_last(s):
try:
return max(s)
except:
return s
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
OUTPUT_EXCEL = '월별원내약품사용현황.xlsx'
data_source_dir = '사용량월별통계/원내'
dfs = []
for fname in os.listdir(data_source_dir):
fn, ext = os.path.splitext(fname)
if ext in ['.xls', '.xlsx']:
df = pd.read_excel(os.path.join(data_source_dir, fname))
df['사용(개시)년월'] = fn
dfs.append(df)
use_amount_df = pd.concat(dfs, ignore_index=True)
drug_standard_df = pd.read_json('drug.json').T
drug_info_df = pd.read_excel('약품정보.xls')
use_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',
'사용(개시)년월']], on='약품코드', how='left')
use_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',
'판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')
use_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])
use_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +
'-' + str(x)[4:6])
use_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[
'사용개시년월'])
use_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])
use_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (
2): '원외/원내', (3): '원내'})
use_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',
(2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})
def get_last(s):
try:
return max(s)
except:
return s
months = use_amount_df['사용(개시)년월'].unique()
months = sorted(months.tolist(), reverse=1)
use_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(
get_last)
use_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.
index(x) if x in months else -1)
use_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']
use_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')
pat = (
'(\\(([^\\d].*?)\\)+\\s*)|퇴장방지\\s*|생산원가보전,*\\s*|사용장려(비\\s*\\d+원|및|비용지급,*\\s*)'
)
use_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',
'약품명(한글)': '약품명(원내)'})
use_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,
'')
pvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',
'약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],
values=['사용량'], aggfunc=sum)
pvt.to_excel(OUTPUT_EXCEL)
os.startfile(OUTPUT_EXCEL)
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
import os, re
OUTPUT_EXCEL = '월별원내약품사용현황.xlsx'
data_source_dir = '사용량월별통계/원내'
dfs = []
for fname in os.listdir(data_source_dir):
fn, ext = os.path.splitext(fname)
if ext in ['.xls', '.xlsx']:
df = pd.read_excel(os.path.join(data_source_dir, fname))
df['사용(개시)년월'] = fn
dfs.append(df)
use_amount_df = pd.concat(dfs, ignore_index=True)
drug_standard_df = pd.read_json('drug.json').T
drug_info_df = pd.read_excel('약품정보.xls')
use_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',
'사용(개시)년월']], on='약품코드', how='left')
use_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',
'판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')
use_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])
use_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +
'-' + str(x)[4:6])
use_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[
'사용개시년월'])
use_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])
use_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (
2): '원외/원내', (3): '원내'})
use_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',
(2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})
def get_last(s):
try:
return max(s)
except:
return s
months = use_amount_df['사용(개시)년월'].unique()
months = sorted(months.tolist(), reverse=1)
use_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(
get_last)
use_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.
index(x) if x in months else -1)
use_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']
use_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')
pat = (
'(\\(([^\\d].*?)\\)+\\s*)|퇴장방지\\s*|생산원가보전,*\\s*|사용장려(비\\s*\\d+원|및|비용지급,*\\s*)'
)
use_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',
'약품명(한글)': '약품명(원내)'})
use_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,
'')
pvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',
'약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],
values=['사용량'], aggfunc=sum)
pvt.to_excel(OUTPUT_EXCEL)
os.startfile(OUTPUT_EXCEL)
<|reserved_special_token_1|>
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
import os, re
# In[2]:
OUTPUT_EXCEL = '월별원내약품사용현황.xlsx'
# In[3]:
# 데이타셋 준비
data_source_dir = '사용량월별통계/원내'
dfs = []
for fname in os.listdir(data_source_dir):
fn, ext = os.path.splitext(fname)
if ext in ['.xls', '.xlsx']:
df = pd.read_excel(os.path.join(data_source_dir, fname))
df['사용(개시)년월'] = fn
dfs.append(df)
use_amount_df = pd.concat(dfs, ignore_index=True)
# In[4]:
drug_standard_df = pd.read_json('drug.json').T
drug_info_df = pd.read_excel('약품정보.xls')
use_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드', '사용(개시)년월']], on='약품코드', how='left')
use_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명', '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')
use_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])
use_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])
use_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df['사용개시년월'])
use_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])
use_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({1: '원외', 2: '원외/원내', 3: '원내'})
use_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({0: '일반', 1: '마약', 2: '향정약', 3: '독약', 4: '한방약', 5: '고가약'})
# In[5]:
def get_last(s):
try:
return max(s)
except:
return s
# In[6]:
months = use_amount_df['사용(개시)년월'].unique()
months = sorted(months.tolist(), reverse=1)
use_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(get_last)
use_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.index(x) if x in months else -1)
# In[7]:
use_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']
# In[8]:
use_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')
# In[9]:
pat = '(\(([^\d].*?)\)+\s*)|퇴장방지\s*|생산원가보전,*\s*|사용장려(비\s*\d+원|및|비용지급,*\s*)'
use_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)', '약품명(한글)': '약품명(원내)'})
use_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat, '')
# In[10]:
pvt = use_amount_in_df.pivot_table(index = ['EDI코드','약품명(드럭인포)', '성분명','약품코드','약품명(원내)','효능코드명','규격단위', '최근미사용월수'], columns=['사용(개시)년월'], values=['사용량'], aggfunc=sum)
# In[11]:
pvt.to_excel(OUTPUT_EXCEL)
os.startfile(OUTPUT_EXCEL)
# In[ ]:
|
flexible
|
{
"blob_id": "16b425d7b8cde1aabe038ccae6922091afb84415",
"index": 411,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\n<mask token>\n",
"step-3": "<mask token>\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\ndrug_standard_df = pd.read_json('drug.json').T\ndrug_info_df = pd.read_excel('약품정보.xls')\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',\n '사용(개시)년월']], on='약품코드', how='left')\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',\n '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +\n '-' + str(x)[4:6])\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[\n '사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (\n 2): '원외/원내', (3): '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',\n (2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(\n get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.\n index(x) if x in months else -1)\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\npat = (\n '(\\\\(([^\\\\d].*?)\\\\)+\\\\s*)|퇴장방지\\\\s*|생산원가보전,*\\\\s*|사용장려(비\\\\s*\\\\d+원|및|비용지급,*\\\\s*)'\n )\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',\n '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,\n '')\npvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',\n '약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],\n values=['사용량'], aggfunc=sum)\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n",
"step-4": "import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport os, re\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\ndrug_standard_df = pd.read_json('drug.json').T\ndrug_info_df = pd.read_excel('약품정보.xls')\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',\n '사용(개시)년월']], on='약품코드', how='left')\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',\n '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +\n '-' + str(x)[4:6])\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[\n '사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (\n 2): '원외/원내', (3): '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',\n (2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(\n get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.\n index(x) if x in months else -1)\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\npat = (\n '(\\\\(([^\\\\d].*?)\\\\)+\\\\s*)|퇴장방지\\\\s*|생산원가보전,*\\\\s*|사용장려(비\\\\s*\\\\d+원|및|비용지급,*\\\\s*)'\n )\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',\n '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,\n '')\npvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',\n '약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],\n values=['사용량'], aggfunc=sum)\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport os, re\n\n\n# In[2]:\n\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\n\n\n# In[3]:\n\n# 데이타셋 준비\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\n\n\n# In[4]:\n\ndrug_standard_df = pd.read_json('drug.json').T\n\ndrug_info_df = pd.read_excel('약품정보.xls')\n\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드', '사용(개시)년월']], on='약품코드', how='left')\n\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명', '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\n\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\n\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])\n\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df['사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\n\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({1: '원외', 2: '원외/원내', 3: '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({0: '일반', 1: '마약', 2: '향정약', 3: '독약', 4: '한방약', 5: '고가약'})\n\n\n# In[5]:\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\n# In[6]:\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.index(x) if x in months else -1)\n\n\n# In[7]:\n\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\n\n\n# In[8]:\n\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\n\n\n# In[9]:\n\npat = '(\\(([^\\d].*?)\\)+\\s*)|퇴장방지\\s*|생산원가보전,*\\s*|사용장려(비\\s*\\d+원|및|비용지급,*\\s*)'\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)', '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat, '')\n\n\n# In[10]:\n\npvt = use_amount_in_df.pivot_table(index = ['EDI코드','약품명(드럭인포)', '성분명','약품코드','약품명(원내)','효능코드명','규격단위', '최근미사용월수'], columns=['사용(개시)년월'], values=['사용량'], aggfunc=sum)\n\n\n# In[11]:\n\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n\n\n# In[ ]:\n\n\n\n",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
<|reserved_special_token_0|>
class Beautyleg7Spider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
if self.db_session is None:
self.logger.error('db_session is None')
return None
repeated_count = 0
if response is None:
self.logger.warn('响应为空,不做处理!')
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text'
).extract_first().strip()
is_persisted_last_item = self.redis_cmd.get(self.
album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(
is_persisted_last_item):
is_last_item_finished = True
self.logger.info('已持久化最后一页的最后主题:%s' % self.
album_last_item_redis_unique_key)
album_last_page_url = response.meta.get('album_last_page_url')
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1
].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = (self.
ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.
sub_url_scheme(album_last_page_url, '') + self.
REDIS_LIMITER + self.sub_url_scheme(
album_last_page_url_last_item_redis_suffix, ''))
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first(
).strip()
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info('Redis中该url album_url:%s已持久化' % album_url)
continue
album_url_object_id = self.get_md5(album_url)
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id
).first()
if count:
count = count[0]
except Exception as e:
self.logger.error('查询数据库异常,原因:{}'.format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info('数据库已有该数据album_url_object_id:%s' %
album_url_object_id)
repeated_count += 1
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node,
album_url, album_url_object_id, category)
yield response.follow(url=album_url, meta={'AlbumItem':
album_item}, callback=self.parse_detail)
selector_list = response.css('.page li a::attr(href)')
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath(
'//li[@class="thisclass"]//text()').extract_first()
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info('Last page:%s' % album_last_page_url)
else:
self.logger.info('Next page:%s' % response.urljoin(
next_url))
yield response.follow(url=next_url, meta={
'album_last_page_url': album_last_page_url},
callback=self.parse)
else:
self.logger.info('selector_list is None')
self.logger.info('重复次数:%s' % repeated_count)
else:
self.logger.info('Stop crawler. None Next page!')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,
resp.status_code))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Beautyleg7Spider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
if self.db_session is None:
self.logger.error('db_session is None')
return None
repeated_count = 0
if response is None:
self.logger.warn('响应为空,不做处理!')
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text'
).extract_first().strip()
is_persisted_last_item = self.redis_cmd.get(self.
album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(
is_persisted_last_item):
is_last_item_finished = True
self.logger.info('已持久化最后一页的最后主题:%s' % self.
album_last_item_redis_unique_key)
album_last_page_url = response.meta.get('album_last_page_url')
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1
].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = (self.
ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.
sub_url_scheme(album_last_page_url, '') + self.
REDIS_LIMITER + self.sub_url_scheme(
album_last_page_url_last_item_redis_suffix, ''))
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first(
).strip()
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info('Redis中该url album_url:%s已持久化' % album_url)
continue
album_url_object_id = self.get_md5(album_url)
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id
).first()
if count:
count = count[0]
except Exception as e:
self.logger.error('查询数据库异常,原因:{}'.format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info('数据库已有该数据album_url_object_id:%s' %
album_url_object_id)
repeated_count += 1
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node,
album_url, album_url_object_id, category)
yield response.follow(url=album_url, meta={'AlbumItem':
album_item}, callback=self.parse_detail)
selector_list = response.css('.page li a::attr(href)')
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath(
'//li[@class="thisclass"]//text()').extract_first()
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info('Last page:%s' % album_last_page_url)
else:
self.logger.info('Next page:%s' % response.urljoin(
next_url))
yield response.follow(url=next_url, meta={
'album_last_page_url': album_last_page_url},
callback=self.parse)
else:
self.logger.info('selector_list is None')
self.logger.info('重复次数:%s' % repeated_count)
else:
self.logger.info('Stop crawler. None Next page!')
def parse_album_item(self, album_node, album_url, album_url_object_id,
category):
album_title = album_node.css('.p a img::attr(alt)').extract_first(
).strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first(
).strip()
regex = '\\d+\\.\\d+.\\d+\\s+No\\.\\d+|\\d+\\-\\d+-\\d+\\s+No\\.\\d+'
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = 'No.unknown'
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get('AlbumItem')
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
relative_next_page_list = response.css('.page li a::attr(href)'
).extract()
next_page_threads = [self.gevent_pool.spawn(self.
get_album_image_item_list, response.urljoin(relative_next_page)
) for relative_next_page in relative_next_page_list[2:-1]]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'
] = self.album_image_item_list
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,
resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()'
).extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()'
).extract_first().split(':')[1]
image_link_list = response.css('.contents a img::attr(src)'
).extract()
else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0
].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0
].split(':')[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in
image_link_list]
regex = '\\s?\\w+[^\\w]?'
regex_group = re.findall(regex, item_title)
stage_name = 'unknown'
if len(regex_group) > 0:
str = regex_group[-1]
if '[' in str:
stage_name = str.split('[')[0].strip()
elif '(' in str:
stage_name = str.split('(')[0].strip()
elif re.match('[^\\d*]', str):
stage_name = re.match('[^\\d*]', str).group()
for image_url in image_link_list:
album_image_item = AlbumImageItem()
album_image_item['item_url'] = image_url
album_image_item['item_url_object_id'] = self.get_md5(image_url)
item_url_list_json = '{}'
album_image_item['item_url_list_json'] = item_url_list_json
album_image_item['item_title'] = item_title
album_image_item['stage_name'] = stage_name
album_image_item['publish_date'] = publish_date
self.album_image_item_list.append(album_image_item)
return self.album_image_item_list
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Beautyleg7Spider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def start_requests(self):
mysql_host = self.crawler.settings.get('MYSQL_HOST')
mysql_port = self.crawler.settings.get('MYSQL_PORT')
mysql_user = self.crawler.settings.get('MYSQL_USER')
mysql_password = self.crawler.settings.get('MYSQL_PASSWORD')
mysql_db_name = self.crawler.settings.get('MYSQL_DB_NAME')
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.
format(mysql_user, mysql_password, mysql_host, mysql_port,
mysql_db_name), pool_recycle=180, echo=False)
session_maker = sessionmaker(bind=engine)
self.db_session = session_maker()
for url in self.start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error('db_session is None')
return None
repeated_count = 0
if response is None:
self.logger.warn('响应为空,不做处理!')
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text'
).extract_first().strip()
is_persisted_last_item = self.redis_cmd.get(self.
album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(
is_persisted_last_item):
is_last_item_finished = True
self.logger.info('已持久化最后一页的最后主题:%s' % self.
album_last_item_redis_unique_key)
album_last_page_url = response.meta.get('album_last_page_url')
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1
].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = (self.
ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.
sub_url_scheme(album_last_page_url, '') + self.
REDIS_LIMITER + self.sub_url_scheme(
album_last_page_url_last_item_redis_suffix, ''))
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first(
).strip()
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info('Redis中该url album_url:%s已持久化' % album_url)
continue
album_url_object_id = self.get_md5(album_url)
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id
).first()
if count:
count = count[0]
except Exception as e:
self.logger.error('查询数据库异常,原因:{}'.format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info('数据库已有该数据album_url_object_id:%s' %
album_url_object_id)
repeated_count += 1
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node,
album_url, album_url_object_id, category)
yield response.follow(url=album_url, meta={'AlbumItem':
album_item}, callback=self.parse_detail)
selector_list = response.css('.page li a::attr(href)')
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath(
'//li[@class="thisclass"]//text()').extract_first()
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info('Last page:%s' % album_last_page_url)
else:
self.logger.info('Next page:%s' % response.urljoin(
next_url))
yield response.follow(url=next_url, meta={
'album_last_page_url': album_last_page_url},
callback=self.parse)
else:
self.logger.info('selector_list is None')
self.logger.info('重复次数:%s' % repeated_count)
else:
self.logger.info('Stop crawler. None Next page!')
def parse_album_item(self, album_node, album_url, album_url_object_id,
category):
album_title = album_node.css('.p a img::attr(alt)').extract_first(
).strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first(
).strip()
regex = '\\d+\\.\\d+.\\d+\\s+No\\.\\d+|\\d+\\-\\d+-\\d+\\s+No\\.\\d+'
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = 'No.unknown'
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get('AlbumItem')
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
relative_next_page_list = response.css('.page li a::attr(href)'
).extract()
next_page_threads = [self.gevent_pool.spawn(self.
get_album_image_item_list, response.urljoin(relative_next_page)
) for relative_next_page in relative_next_page_list[2:-1]]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'
] = self.album_image_item_list
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,
resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()'
).extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()'
).extract_first().split(':')[1]
image_link_list = response.css('.contents a img::attr(src)'
).extract()
else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0
].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0
].split(':')[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in
image_link_list]
regex = '\\s?\\w+[^\\w]?'
regex_group = re.findall(regex, item_title)
stage_name = 'unknown'
if len(regex_group) > 0:
str = regex_group[-1]
if '[' in str:
stage_name = str.split('[')[0].strip()
elif '(' in str:
stage_name = str.split('(')[0].strip()
elif re.match('[^\\d*]', str):
stage_name = re.match('[^\\d*]', str).group()
for image_url in image_link_list:
album_image_item = AlbumImageItem()
album_image_item['item_url'] = image_url
album_image_item['item_url_object_id'] = self.get_md5(image_url)
item_url_list_json = '{}'
album_image_item['item_url_list_json'] = item_url_list_json
album_image_item['item_title'] = item_title
album_image_item['stage_name'] = stage_name
album_image_item['publish_date'] = publish_date
self.album_image_item_list.append(album_image_item)
return self.album_image_item_list
<|reserved_special_token_0|>
@staticmethod
def sub_url_scheme(website, replace_str):
scheme_regex = '^(http://|https://)'
return re.sub(scheme_regex, replace_str, website)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Beautyleg7Spider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def start_requests(self):
mysql_host = self.crawler.settings.get('MYSQL_HOST')
mysql_port = self.crawler.settings.get('MYSQL_PORT')
mysql_user = self.crawler.settings.get('MYSQL_USER')
mysql_password = self.crawler.settings.get('MYSQL_PASSWORD')
mysql_db_name = self.crawler.settings.get('MYSQL_DB_NAME')
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.
format(mysql_user, mysql_password, mysql_host, mysql_port,
mysql_db_name), pool_recycle=180, echo=False)
session_maker = sessionmaker(bind=engine)
self.db_session = session_maker()
for url in self.start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error('db_session is None')
return None
repeated_count = 0
if response is None:
self.logger.warn('响应为空,不做处理!')
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text'
).extract_first().strip()
is_persisted_last_item = self.redis_cmd.get(self.
album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(
is_persisted_last_item):
is_last_item_finished = True
self.logger.info('已持久化最后一页的最后主题:%s' % self.
album_last_item_redis_unique_key)
album_last_page_url = response.meta.get('album_last_page_url')
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1
].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = (self.
ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.
sub_url_scheme(album_last_page_url, '') + self.
REDIS_LIMITER + self.sub_url_scheme(
album_last_page_url_last_item_redis_suffix, ''))
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first(
).strip()
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info('Redis中该url album_url:%s已持久化' % album_url)
continue
album_url_object_id = self.get_md5(album_url)
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id
).first()
if count:
count = count[0]
except Exception as e:
self.logger.error('查询数据库异常,原因:{}'.format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info('数据库已有该数据album_url_object_id:%s' %
album_url_object_id)
repeated_count += 1
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node,
album_url, album_url_object_id, category)
yield response.follow(url=album_url, meta={'AlbumItem':
album_item}, callback=self.parse_detail)
selector_list = response.css('.page li a::attr(href)')
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath(
'//li[@class="thisclass"]//text()').extract_first()
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info('Last page:%s' % album_last_page_url)
else:
self.logger.info('Next page:%s' % response.urljoin(
next_url))
yield response.follow(url=next_url, meta={
'album_last_page_url': album_last_page_url},
callback=self.parse)
else:
self.logger.info('selector_list is None')
self.logger.info('重复次数:%s' % repeated_count)
else:
self.logger.info('Stop crawler. None Next page!')
def parse_album_item(self, album_node, album_url, album_url_object_id,
category):
album_title = album_node.css('.p a img::attr(alt)').extract_first(
).strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first(
).strip()
regex = '\\d+\\.\\d+.\\d+\\s+No\\.\\d+|\\d+\\-\\d+-\\d+\\s+No\\.\\d+'
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = 'No.unknown'
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get('AlbumItem')
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
relative_next_page_list = response.css('.page li a::attr(href)'
).extract()
next_page_threads = [self.gevent_pool.spawn(self.
get_album_image_item_list, response.urljoin(relative_next_page)
) for relative_next_page in relative_next_page_list[2:-1]]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'
] = self.album_image_item_list
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,
resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()'
).extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()'
).extract_first().split(':')[1]
image_link_list = response.css('.contents a img::attr(src)'
).extract()
else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0
].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0
].split(':')[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in
image_link_list]
regex = '\\s?\\w+[^\\w]?'
regex_group = re.findall(regex, item_title)
stage_name = 'unknown'
if len(regex_group) > 0:
str = regex_group[-1]
if '[' in str:
stage_name = str.split('[')[0].strip()
elif '(' in str:
stage_name = str.split('(')[0].strip()
elif re.match('[^\\d*]', str):
stage_name = re.match('[^\\d*]', str).group()
for image_url in image_link_list:
album_image_item = AlbumImageItem()
album_image_item['item_url'] = image_url
album_image_item['item_url_object_id'] = self.get_md5(image_url)
item_url_list_json = '{}'
album_image_item['item_url_list_json'] = item_url_list_json
album_image_item['item_title'] = item_title
album_image_item['stage_name'] = stage_name
album_image_item['publish_date'] = publish_date
self.album_image_item_list.append(album_image_item)
return self.album_image_item_list
@staticmethod
def get_md5(param):
if isinstance(param, str):
param = param.encode()
m = hashlib.md5()
m.update(param)
return m.hexdigest()
@staticmethod
def sub_url_scheme(website, replace_str):
scheme_regex = '^(http://|https://)'
return re.sub(scheme_regex, replace_str, website)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import hashlib
import re
from datetime import datetime
import gevent
import requests
import scrapy
from gevent.pool import Pool
from lxml import etree
from scrapy.http import HtmlResponse
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from ..items import Album, AlbumImageRelationItem, AlbumItem, AlbumImageItem
from ..utils.const import const
from ..utils.redis_util import get_redis_conn_from_pool
class Beautyleg7Spider(scrapy.Spider):
name = 'Beautyleg7Spider'
category_list = ['siwameitui', 'xingganmeinv', 'weimeixiezhen', 'ribenmeinv']
start_urls = [('http://www.beautyleg7.com/' + category) for category in category_list]
const.REPEATED_THRESHOLD = 10
def __init__(self, name=None, **kwargs):
super().__init__(name=None, **kwargs)
self.db_session = None
self.gevent_pool = Pool(32)
self.redis_cmd = get_redis_conn_from_pool()
self.ALBUM_URL_REDIS_KEY_PREFIX = "album_url"
self.REDIS_LIMITER = ":"
self.album_last_item_redis_unique_key = ""
self.album_item = None
self.album_image_item_list = []
self.album_image_relation_item = AlbumImageRelationItem()
def start_requests(self):
mysql_host = self.crawler.settings.get("MYSQL_HOST")
mysql_port = self.crawler.settings.get("MYSQL_PORT")
mysql_user = self.crawler.settings.get("MYSQL_USER")
mysql_password = self.crawler.settings.get("MYSQL_PASSWORD")
mysql_db_name = self.crawler.settings.get("MYSQL_DB_NAME")
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(mysql_user, mysql_password,
mysql_host, mysql_port,
mysql_db_name),
pool_recycle=180, echo=False)
session_maker = sessionmaker(bind=engine)
self.db_session = session_maker()
for url in self.start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error("db_session is None")
return None
repeated_count = 0
if response is None:
self.logger.warn("响应为空,不做处理!")
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text').extract_first().strip()
# 判断最后一页的最后主题是否被持久化
is_persisted_last_item = self.redis_cmd.get(self.album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(is_persisted_last_item):
is_last_item_finished = True
self.logger.info("已持久化最后一页的最后主题:%s" % self.album_last_item_redis_unique_key)
# 如果是最后一页则设置Redis存储key:“最后一页页码:最后一条主题url”,value:is_persisted(取值为0或1,默认为0)
album_last_page_url = response.meta.get("album_last_page_url")
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = self.ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url,
"") + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url_last_item_redis_suffix,
"")
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first().strip()
# 判断当前主题url是否已持久化
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info("Redis中该url album_url:%s已持久化" % album_url)
continue
album_url_object_id = self.get_md5(album_url)
# 只有name不存在时,当前set操作才执行
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id).first()
if count:
count = count[0]
except Exception as e:
self.logger.error("查询数据库异常,原因:{}".format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info("数据库已有该数据album_url_object_id:%s" % album_url_object_id)
repeated_count += 1
# 只有name存在时,当前set操作才执行
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node, album_url, album_url_object_id, category)
yield response.follow(url=album_url,
meta={"AlbumItem": album_item},
callback=self.parse_detail)
# 提取下一页并交给scrapy下载
selector_list = response.css('.page li a::attr(href)')
# 如果最后一页的最后一个主题url未被持久化则继续爬取
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath('//li[@class="thisclass"]//text()').extract_first()
# 如果当前页是第一页则获取最后一页url
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info("Last page:%s" % album_last_page_url)
else:
self.logger.info("Next page:%s" % response.urljoin(next_url))
yield response.follow(url=next_url,
meta={"album_last_page_url": album_last_page_url},
callback=self.parse)
else:
self.logger.info("selector_list is None")
self.logger.info("重复次数:%s" % repeated_count)
else:
self.logger.info("Stop crawler. None Next page!")
def parse_album_item(self, album_node, album_url, album_url_object_id, category):
album_title = album_node.css('.p a img::attr(alt)').extract_first().strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first().strip()
regex = "\d+\.\d+.\d+\s+No\.\d+|\d+\-\d+-\d+\s+No\.\d+"
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = "No.unknown"
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get("AlbumItem")
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
# 详情页分页链接,循环生成所有子页面的请求
relative_next_page_list = response.css('.page li a::attr(href)').extract()
# 使用gevent协程池提升网络IO处理效率
next_page_threads = [
self.gevent_pool.spawn(self.get_album_image_item_list, response.urljoin(relative_next_page))
for relative_next_page in relative_next_page_list[2:-1]
]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'] = self.album_image_item_list
# 重新初始化
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn("下载此页{}失败,返回的状态码为{}".format(abs_next_page, resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()').extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()').extract_first().split(":")[1]
image_link_list = response.css('.contents a img::attr(src)').extract()
else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0].split(":")[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in image_link_list]
regex = "\s?\w+[^\w]?"
regex_group = re.findall(regex, item_title)
stage_name = "unknown"
if len(regex_group) > 0:
str = regex_group[-1]
if "[" in str:
stage_name = str.split("[")[0].strip()
elif "(" in str:
stage_name = str.split("(")[0].strip()
elif re.match('[^\d*]', str):
stage_name = re.match('[^\d*]', str).group()
# 详情页多个图片链接
for image_url in image_link_list:
album_image_item = AlbumImageItem()
album_image_item['item_url'] = image_url
album_image_item['item_url_object_id'] = self.get_md5(image_url)
item_url_list_json = "{}"
album_image_item['item_url_list_json'] = item_url_list_json
album_image_item['item_title'] = item_title
album_image_item['stage_name'] = stage_name
album_image_item['publish_date'] = publish_date
self.album_image_item_list.append(album_image_item)
return self.album_image_item_list
@staticmethod
def get_md5(param):
if isinstance(param, str):
param = param.encode()
m = hashlib.md5()
m.update(param)
return m.hexdigest()
@staticmethod
def sub_url_scheme(website, replace_str):
scheme_regex = "^(http://|https://)"
return re.sub(scheme_regex, replace_str, website)
|
flexible
|
{
"blob_id": "eb853e430b996a81dc2ef20c320979a3e04d956a",
"index": 237,
"step-1": "<mask token>\n\n\nclass Beautyleg7Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n if self.db_session is None:\n self.logger.error('db_session is None')\n return None\n repeated_count = 0\n if response is None:\n self.logger.warn('响应为空,不做处理!')\n else:\n album_nodes = response.css('.pic .item')\n category = response.css('.sitepath a')[1].css('a::text'\n ).extract_first().strip()\n is_persisted_last_item = self.redis_cmd.get(self.\n album_last_item_redis_unique_key)\n is_last_item_finished = False\n if is_persisted_last_item is not None and int(\n is_persisted_last_item):\n is_last_item_finished = True\n self.logger.info('已持久化最后一页的最后主题:%s' % self.\n album_last_item_redis_unique_key)\n album_last_page_url = response.meta.get('album_last_page_url')\n if album_last_page_url is not None:\n album_last_page_url_last_item_redis_suffix = album_nodes[-1\n ].css('.p a::attr(href)').extract_first()\n self.album_last_item_redis_unique_key = (self.\n ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.\n sub_url_scheme(album_last_page_url, '') + self.\n REDIS_LIMITER + self.sub_url_scheme(\n album_last_page_url_last_item_redis_suffix, ''))\n self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)\n for album_node in album_nodes:\n album_url = album_node.css('.p a::attr(href)').extract_first(\n ).strip()\n is_persisted = self.redis_cmd.get(album_url)\n if is_persisted is not None and int(is_persisted):\n self.logger.info('Redis中该url album_url:%s已持久化' % album_url)\n continue\n album_url_object_id = self.get_md5(album_url)\n self.redis_cmd.setnx(album_url, 0)\n count = 0\n try:\n count = self.db_session.query(func.count()).filter(\n Album.album_url_object_id == album_url_object_id\n ).first()\n if count:\n count = count[0]\n except Exception as e:\n self.logger.error('查询数据库异常,原因:{}'.format(e))\n finally:\n self.db_session.rollback()\n if count:\n self.logger.info('数据库已有该数据album_url_object_id:%s' %\n album_url_object_id)\n repeated_count += 1\n self.redis_cmd.set(album_url, 1, xx=True)\n continue\n else:\n album_item = self.parse_album_item(album_node,\n album_url, album_url_object_id, category)\n yield response.follow(url=album_url, meta={'AlbumItem':\n album_item}, callback=self.parse_detail)\n selector_list = response.css('.page li a::attr(href)')\n if not is_last_item_finished:\n if selector_list:\n last_page_url = None\n current_url_page = response.xpath(\n '//li[@class=\"thisclass\"]//text()').extract_first()\n if current_url_page and int(current_url_page) == 1:\n last_page_url = selector_list[-1].extract()\n next_url = selector_list[-2].extract()\n if next_url == last_page_url:\n album_last_page_url = response.urljoin(last_page_url)\n self.logger.info('Last page:%s' % album_last_page_url)\n else:\n self.logger.info('Next page:%s' % response.urljoin(\n next_url))\n yield response.follow(url=next_url, meta={\n 'album_last_page_url': album_last_page_url},\n callback=self.parse)\n else:\n self.logger.info('selector_list is None')\n self.logger.info('重复次数:%s' % repeated_count)\n else:\n self.logger.info('Stop crawler. None Next page!')\n <mask token>\n <mask token>\n\n def get_album_image_item_list(self, abs_next_page):\n \"\"\"\n 使用下页绝对路径同步请求\n :param abs_next_page:\n :return:\n \"\"\"\n resp = requests.get(abs_next_page)\n if resp.status_code == 200:\n encoding = requests.utils.get_encodings_from_content(resp.text)\n resp.encoding = encoding[0]\n self.parse_album_image_item(etree.HTML(resp.text))\n else:\n self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,\n resp.status_code))\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Beautyleg7Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n if self.db_session is None:\n self.logger.error('db_session is None')\n return None\n repeated_count = 0\n if response is None:\n self.logger.warn('响应为空,不做处理!')\n else:\n album_nodes = response.css('.pic .item')\n category = response.css('.sitepath a')[1].css('a::text'\n ).extract_first().strip()\n is_persisted_last_item = self.redis_cmd.get(self.\n album_last_item_redis_unique_key)\n is_last_item_finished = False\n if is_persisted_last_item is not None and int(\n is_persisted_last_item):\n is_last_item_finished = True\n self.logger.info('已持久化最后一页的最后主题:%s' % self.\n album_last_item_redis_unique_key)\n album_last_page_url = response.meta.get('album_last_page_url')\n if album_last_page_url is not None:\n album_last_page_url_last_item_redis_suffix = album_nodes[-1\n ].css('.p a::attr(href)').extract_first()\n self.album_last_item_redis_unique_key = (self.\n ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.\n sub_url_scheme(album_last_page_url, '') + self.\n REDIS_LIMITER + self.sub_url_scheme(\n album_last_page_url_last_item_redis_suffix, ''))\n self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)\n for album_node in album_nodes:\n album_url = album_node.css('.p a::attr(href)').extract_first(\n ).strip()\n is_persisted = self.redis_cmd.get(album_url)\n if is_persisted is not None and int(is_persisted):\n self.logger.info('Redis中该url album_url:%s已持久化' % album_url)\n continue\n album_url_object_id = self.get_md5(album_url)\n self.redis_cmd.setnx(album_url, 0)\n count = 0\n try:\n count = self.db_session.query(func.count()).filter(\n Album.album_url_object_id == album_url_object_id\n ).first()\n if count:\n count = count[0]\n except Exception as e:\n self.logger.error('查询数据库异常,原因:{}'.format(e))\n finally:\n self.db_session.rollback()\n if count:\n self.logger.info('数据库已有该数据album_url_object_id:%s' %\n album_url_object_id)\n repeated_count += 1\n self.redis_cmd.set(album_url, 1, xx=True)\n continue\n else:\n album_item = self.parse_album_item(album_node,\n album_url, album_url_object_id, category)\n yield response.follow(url=album_url, meta={'AlbumItem':\n album_item}, callback=self.parse_detail)\n selector_list = response.css('.page li a::attr(href)')\n if not is_last_item_finished:\n if selector_list:\n last_page_url = None\n current_url_page = response.xpath(\n '//li[@class=\"thisclass\"]//text()').extract_first()\n if current_url_page and int(current_url_page) == 1:\n last_page_url = selector_list[-1].extract()\n next_url = selector_list[-2].extract()\n if next_url == last_page_url:\n album_last_page_url = response.urljoin(last_page_url)\n self.logger.info('Last page:%s' % album_last_page_url)\n else:\n self.logger.info('Next page:%s' % response.urljoin(\n next_url))\n yield response.follow(url=next_url, meta={\n 'album_last_page_url': album_last_page_url},\n callback=self.parse)\n else:\n self.logger.info('selector_list is None')\n self.logger.info('重复次数:%s' % repeated_count)\n else:\n self.logger.info('Stop crawler. None Next page!')\n\n def parse_album_item(self, album_node, album_url, album_url_object_id,\n category):\n album_title = album_node.css('.p a img::attr(alt)').extract_first(\n ).strip()\n cover_url = album_node.css('.p a img::attr(src)').extract_first(\n ).strip()\n regex = '\\\\d+\\\\.\\\\d+.\\\\d+\\\\s+No\\\\.\\\\d+|\\\\d+\\\\-\\\\d+-\\\\d+\\\\s+No\\\\.\\\\d+'\n number_group = re.findall(regex, album_title)\n if len(number_group) > 0:\n number = number_group[0]\n else:\n number = 'No.unknown'\n create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n album_item = AlbumItem()\n album_item['category'] = category\n album_item['album_url'] = album_url\n album_item['album_url_object_id'] = album_url_object_id\n album_item['album_title'] = album_title\n album_item['cover_url'] = cover_url\n album_item['number'] = number\n album_item['create_date'] = create_date\n return album_item\n\n def parse_detail(self, response):\n self.album_item = response.meta.get('AlbumItem')\n self.album_image_relation_item['album_item'] = self.album_item\n self.parse_album_image_item(response)\n relative_next_page_list = response.css('.page li a::attr(href)'\n ).extract()\n next_page_threads = [self.gevent_pool.spawn(self.\n get_album_image_item_list, response.urljoin(relative_next_page)\n ) for relative_next_page in relative_next_page_list[2:-1]]\n gevent.joinall(next_page_threads)\n self.album_image_relation_item['album_image_item_list'\n ] = self.album_image_item_list\n self.album_image_item_list = []\n yield self.album_image_relation_item\n\n def get_album_image_item_list(self, abs_next_page):\n \"\"\"\n 使用下页绝对路径同步请求\n :param abs_next_page:\n :return:\n \"\"\"\n resp = requests.get(abs_next_page)\n if resp.status_code == 200:\n encoding = requests.utils.get_encodings_from_content(resp.text)\n resp.encoding = encoding[0]\n self.parse_album_image_item(etree.HTML(resp.text))\n else:\n self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,\n resp.status_code))\n\n def parse_album_image_item(self, response):\n \"\"\"\n 解析item并返回给pipelines\n :param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析\n :return:\n \"\"\"\n if isinstance(response, HtmlResponse):\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()'\n ).extract_first().strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()'\n ).extract_first().split(':')[1]\n image_link_list = response.css('.contents a img::attr(src)'\n ).extract()\n else:\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()')[0\n ].strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()')[0\n ].split(':')[1]\n image_link_list = response.xpath('//div[@class=\"contents\"]/a/img')\n image_link_list = [image_link.attrib['src'] for image_link in\n image_link_list]\n regex = '\\\\s?\\\\w+[^\\\\w]?'\n regex_group = re.findall(regex, item_title)\n stage_name = 'unknown'\n if len(regex_group) > 0:\n str = regex_group[-1]\n if '[' in str:\n stage_name = str.split('[')[0].strip()\n elif '(' in str:\n stage_name = str.split('(')[0].strip()\n elif re.match('[^\\\\d*]', str):\n stage_name = re.match('[^\\\\d*]', str).group()\n for image_url in image_link_list:\n album_image_item = AlbumImageItem()\n album_image_item['item_url'] = image_url\n album_image_item['item_url_object_id'] = self.get_md5(image_url)\n item_url_list_json = '{}'\n album_image_item['item_url_list_json'] = item_url_list_json\n album_image_item['item_title'] = item_title\n album_image_item['stage_name'] = stage_name\n album_image_item['publish_date'] = publish_date\n self.album_image_item_list.append(album_image_item)\n return self.album_image_item_list\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Beautyleg7Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n mysql_host = self.crawler.settings.get('MYSQL_HOST')\n mysql_port = self.crawler.settings.get('MYSQL_PORT')\n mysql_user = self.crawler.settings.get('MYSQL_USER')\n mysql_password = self.crawler.settings.get('MYSQL_PASSWORD')\n mysql_db_name = self.crawler.settings.get('MYSQL_DB_NAME')\n engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.\n format(mysql_user, mysql_password, mysql_host, mysql_port,\n mysql_db_name), pool_recycle=180, echo=False)\n session_maker = sessionmaker(bind=engine)\n self.db_session = session_maker()\n for url in self.start_urls:\n yield scrapy.Request(url)\n\n def parse(self, response):\n if self.db_session is None:\n self.logger.error('db_session is None')\n return None\n repeated_count = 0\n if response is None:\n self.logger.warn('响应为空,不做处理!')\n else:\n album_nodes = response.css('.pic .item')\n category = response.css('.sitepath a')[1].css('a::text'\n ).extract_first().strip()\n is_persisted_last_item = self.redis_cmd.get(self.\n album_last_item_redis_unique_key)\n is_last_item_finished = False\n if is_persisted_last_item is not None and int(\n is_persisted_last_item):\n is_last_item_finished = True\n self.logger.info('已持久化最后一页的最后主题:%s' % self.\n album_last_item_redis_unique_key)\n album_last_page_url = response.meta.get('album_last_page_url')\n if album_last_page_url is not None:\n album_last_page_url_last_item_redis_suffix = album_nodes[-1\n ].css('.p a::attr(href)').extract_first()\n self.album_last_item_redis_unique_key = (self.\n ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.\n sub_url_scheme(album_last_page_url, '') + self.\n REDIS_LIMITER + self.sub_url_scheme(\n album_last_page_url_last_item_redis_suffix, ''))\n self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)\n for album_node in album_nodes:\n album_url = album_node.css('.p a::attr(href)').extract_first(\n ).strip()\n is_persisted = self.redis_cmd.get(album_url)\n if is_persisted is not None and int(is_persisted):\n self.logger.info('Redis中该url album_url:%s已持久化' % album_url)\n continue\n album_url_object_id = self.get_md5(album_url)\n self.redis_cmd.setnx(album_url, 0)\n count = 0\n try:\n count = self.db_session.query(func.count()).filter(\n Album.album_url_object_id == album_url_object_id\n ).first()\n if count:\n count = count[0]\n except Exception as e:\n self.logger.error('查询数据库异常,原因:{}'.format(e))\n finally:\n self.db_session.rollback()\n if count:\n self.logger.info('数据库已有该数据album_url_object_id:%s' %\n album_url_object_id)\n repeated_count += 1\n self.redis_cmd.set(album_url, 1, xx=True)\n continue\n else:\n album_item = self.parse_album_item(album_node,\n album_url, album_url_object_id, category)\n yield response.follow(url=album_url, meta={'AlbumItem':\n album_item}, callback=self.parse_detail)\n selector_list = response.css('.page li a::attr(href)')\n if not is_last_item_finished:\n if selector_list:\n last_page_url = None\n current_url_page = response.xpath(\n '//li[@class=\"thisclass\"]//text()').extract_first()\n if current_url_page and int(current_url_page) == 1:\n last_page_url = selector_list[-1].extract()\n next_url = selector_list[-2].extract()\n if next_url == last_page_url:\n album_last_page_url = response.urljoin(last_page_url)\n self.logger.info('Last page:%s' % album_last_page_url)\n else:\n self.logger.info('Next page:%s' % response.urljoin(\n next_url))\n yield response.follow(url=next_url, meta={\n 'album_last_page_url': album_last_page_url},\n callback=self.parse)\n else:\n self.logger.info('selector_list is None')\n self.logger.info('重复次数:%s' % repeated_count)\n else:\n self.logger.info('Stop crawler. None Next page!')\n\n def parse_album_item(self, album_node, album_url, album_url_object_id,\n category):\n album_title = album_node.css('.p a img::attr(alt)').extract_first(\n ).strip()\n cover_url = album_node.css('.p a img::attr(src)').extract_first(\n ).strip()\n regex = '\\\\d+\\\\.\\\\d+.\\\\d+\\\\s+No\\\\.\\\\d+|\\\\d+\\\\-\\\\d+-\\\\d+\\\\s+No\\\\.\\\\d+'\n number_group = re.findall(regex, album_title)\n if len(number_group) > 0:\n number = number_group[0]\n else:\n number = 'No.unknown'\n create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n album_item = AlbumItem()\n album_item['category'] = category\n album_item['album_url'] = album_url\n album_item['album_url_object_id'] = album_url_object_id\n album_item['album_title'] = album_title\n album_item['cover_url'] = cover_url\n album_item['number'] = number\n album_item['create_date'] = create_date\n return album_item\n\n def parse_detail(self, response):\n self.album_item = response.meta.get('AlbumItem')\n self.album_image_relation_item['album_item'] = self.album_item\n self.parse_album_image_item(response)\n relative_next_page_list = response.css('.page li a::attr(href)'\n ).extract()\n next_page_threads = [self.gevent_pool.spawn(self.\n get_album_image_item_list, response.urljoin(relative_next_page)\n ) for relative_next_page in relative_next_page_list[2:-1]]\n gevent.joinall(next_page_threads)\n self.album_image_relation_item['album_image_item_list'\n ] = self.album_image_item_list\n self.album_image_item_list = []\n yield self.album_image_relation_item\n\n def get_album_image_item_list(self, abs_next_page):\n \"\"\"\n 使用下页绝对路径同步请求\n :param abs_next_page:\n :return:\n \"\"\"\n resp = requests.get(abs_next_page)\n if resp.status_code == 200:\n encoding = requests.utils.get_encodings_from_content(resp.text)\n resp.encoding = encoding[0]\n self.parse_album_image_item(etree.HTML(resp.text))\n else:\n self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,\n resp.status_code))\n\n def parse_album_image_item(self, response):\n \"\"\"\n 解析item并返回给pipelines\n :param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析\n :return:\n \"\"\"\n if isinstance(response, HtmlResponse):\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()'\n ).extract_first().strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()'\n ).extract_first().split(':')[1]\n image_link_list = response.css('.contents a img::attr(src)'\n ).extract()\n else:\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()')[0\n ].strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()')[0\n ].split(':')[1]\n image_link_list = response.xpath('//div[@class=\"contents\"]/a/img')\n image_link_list = [image_link.attrib['src'] for image_link in\n image_link_list]\n regex = '\\\\s?\\\\w+[^\\\\w]?'\n regex_group = re.findall(regex, item_title)\n stage_name = 'unknown'\n if len(regex_group) > 0:\n str = regex_group[-1]\n if '[' in str:\n stage_name = str.split('[')[0].strip()\n elif '(' in str:\n stage_name = str.split('(')[0].strip()\n elif re.match('[^\\\\d*]', str):\n stage_name = re.match('[^\\\\d*]', str).group()\n for image_url in image_link_list:\n album_image_item = AlbumImageItem()\n album_image_item['item_url'] = image_url\n album_image_item['item_url_object_id'] = self.get_md5(image_url)\n item_url_list_json = '{}'\n album_image_item['item_url_list_json'] = item_url_list_json\n album_image_item['item_title'] = item_title\n album_image_item['stage_name'] = stage_name\n album_image_item['publish_date'] = publish_date\n self.album_image_item_list.append(album_image_item)\n return self.album_image_item_list\n <mask token>\n\n @staticmethod\n def sub_url_scheme(website, replace_str):\n scheme_regex = '^(http://|https://)'\n return re.sub(scheme_regex, replace_str, website)\n",
"step-4": "<mask token>\n\n\nclass Beautyleg7Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n mysql_host = self.crawler.settings.get('MYSQL_HOST')\n mysql_port = self.crawler.settings.get('MYSQL_PORT')\n mysql_user = self.crawler.settings.get('MYSQL_USER')\n mysql_password = self.crawler.settings.get('MYSQL_PASSWORD')\n mysql_db_name = self.crawler.settings.get('MYSQL_DB_NAME')\n engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.\n format(mysql_user, mysql_password, mysql_host, mysql_port,\n mysql_db_name), pool_recycle=180, echo=False)\n session_maker = sessionmaker(bind=engine)\n self.db_session = session_maker()\n for url in self.start_urls:\n yield scrapy.Request(url)\n\n def parse(self, response):\n if self.db_session is None:\n self.logger.error('db_session is None')\n return None\n repeated_count = 0\n if response is None:\n self.logger.warn('响应为空,不做处理!')\n else:\n album_nodes = response.css('.pic .item')\n category = response.css('.sitepath a')[1].css('a::text'\n ).extract_first().strip()\n is_persisted_last_item = self.redis_cmd.get(self.\n album_last_item_redis_unique_key)\n is_last_item_finished = False\n if is_persisted_last_item is not None and int(\n is_persisted_last_item):\n is_last_item_finished = True\n self.logger.info('已持久化最后一页的最后主题:%s' % self.\n album_last_item_redis_unique_key)\n album_last_page_url = response.meta.get('album_last_page_url')\n if album_last_page_url is not None:\n album_last_page_url_last_item_redis_suffix = album_nodes[-1\n ].css('.p a::attr(href)').extract_first()\n self.album_last_item_redis_unique_key = (self.\n ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + self.\n sub_url_scheme(album_last_page_url, '') + self.\n REDIS_LIMITER + self.sub_url_scheme(\n album_last_page_url_last_item_redis_suffix, ''))\n self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)\n for album_node in album_nodes:\n album_url = album_node.css('.p a::attr(href)').extract_first(\n ).strip()\n is_persisted = self.redis_cmd.get(album_url)\n if is_persisted is not None and int(is_persisted):\n self.logger.info('Redis中该url album_url:%s已持久化' % album_url)\n continue\n album_url_object_id = self.get_md5(album_url)\n self.redis_cmd.setnx(album_url, 0)\n count = 0\n try:\n count = self.db_session.query(func.count()).filter(\n Album.album_url_object_id == album_url_object_id\n ).first()\n if count:\n count = count[0]\n except Exception as e:\n self.logger.error('查询数据库异常,原因:{}'.format(e))\n finally:\n self.db_session.rollback()\n if count:\n self.logger.info('数据库已有该数据album_url_object_id:%s' %\n album_url_object_id)\n repeated_count += 1\n self.redis_cmd.set(album_url, 1, xx=True)\n continue\n else:\n album_item = self.parse_album_item(album_node,\n album_url, album_url_object_id, category)\n yield response.follow(url=album_url, meta={'AlbumItem':\n album_item}, callback=self.parse_detail)\n selector_list = response.css('.page li a::attr(href)')\n if not is_last_item_finished:\n if selector_list:\n last_page_url = None\n current_url_page = response.xpath(\n '//li[@class=\"thisclass\"]//text()').extract_first()\n if current_url_page and int(current_url_page) == 1:\n last_page_url = selector_list[-1].extract()\n next_url = selector_list[-2].extract()\n if next_url == last_page_url:\n album_last_page_url = response.urljoin(last_page_url)\n self.logger.info('Last page:%s' % album_last_page_url)\n else:\n self.logger.info('Next page:%s' % response.urljoin(\n next_url))\n yield response.follow(url=next_url, meta={\n 'album_last_page_url': album_last_page_url},\n callback=self.parse)\n else:\n self.logger.info('selector_list is None')\n self.logger.info('重复次数:%s' % repeated_count)\n else:\n self.logger.info('Stop crawler. None Next page!')\n\n def parse_album_item(self, album_node, album_url, album_url_object_id,\n category):\n album_title = album_node.css('.p a img::attr(alt)').extract_first(\n ).strip()\n cover_url = album_node.css('.p a img::attr(src)').extract_first(\n ).strip()\n regex = '\\\\d+\\\\.\\\\d+.\\\\d+\\\\s+No\\\\.\\\\d+|\\\\d+\\\\-\\\\d+-\\\\d+\\\\s+No\\\\.\\\\d+'\n number_group = re.findall(regex, album_title)\n if len(number_group) > 0:\n number = number_group[0]\n else:\n number = 'No.unknown'\n create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n album_item = AlbumItem()\n album_item['category'] = category\n album_item['album_url'] = album_url\n album_item['album_url_object_id'] = album_url_object_id\n album_item['album_title'] = album_title\n album_item['cover_url'] = cover_url\n album_item['number'] = number\n album_item['create_date'] = create_date\n return album_item\n\n def parse_detail(self, response):\n self.album_item = response.meta.get('AlbumItem')\n self.album_image_relation_item['album_item'] = self.album_item\n self.parse_album_image_item(response)\n relative_next_page_list = response.css('.page li a::attr(href)'\n ).extract()\n next_page_threads = [self.gevent_pool.spawn(self.\n get_album_image_item_list, response.urljoin(relative_next_page)\n ) for relative_next_page in relative_next_page_list[2:-1]]\n gevent.joinall(next_page_threads)\n self.album_image_relation_item['album_image_item_list'\n ] = self.album_image_item_list\n self.album_image_item_list = []\n yield self.album_image_relation_item\n\n def get_album_image_item_list(self, abs_next_page):\n \"\"\"\n 使用下页绝对路径同步请求\n :param abs_next_page:\n :return:\n \"\"\"\n resp = requests.get(abs_next_page)\n if resp.status_code == 200:\n encoding = requests.utils.get_encodings_from_content(resp.text)\n resp.encoding = encoding[0]\n self.parse_album_image_item(etree.HTML(resp.text))\n else:\n self.logger.warn('下载此页{}失败,返回的状态码为{}'.format(abs_next_page,\n resp.status_code))\n\n def parse_album_image_item(self, response):\n \"\"\"\n 解析item并返回给pipelines\n :param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析\n :return:\n \"\"\"\n if isinstance(response, HtmlResponse):\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()'\n ).extract_first().strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()'\n ).extract_first().split(':')[1]\n image_link_list = response.css('.contents a img::attr(src)'\n ).extract()\n else:\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()')[0\n ].strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()')[0\n ].split(':')[1]\n image_link_list = response.xpath('//div[@class=\"contents\"]/a/img')\n image_link_list = [image_link.attrib['src'] for image_link in\n image_link_list]\n regex = '\\\\s?\\\\w+[^\\\\w]?'\n regex_group = re.findall(regex, item_title)\n stage_name = 'unknown'\n if len(regex_group) > 0:\n str = regex_group[-1]\n if '[' in str:\n stage_name = str.split('[')[0].strip()\n elif '(' in str:\n stage_name = str.split('(')[0].strip()\n elif re.match('[^\\\\d*]', str):\n stage_name = re.match('[^\\\\d*]', str).group()\n for image_url in image_link_list:\n album_image_item = AlbumImageItem()\n album_image_item['item_url'] = image_url\n album_image_item['item_url_object_id'] = self.get_md5(image_url)\n item_url_list_json = '{}'\n album_image_item['item_url_list_json'] = item_url_list_json\n album_image_item['item_title'] = item_title\n album_image_item['stage_name'] = stage_name\n album_image_item['publish_date'] = publish_date\n self.album_image_item_list.append(album_image_item)\n return self.album_image_item_list\n\n @staticmethod\n def get_md5(param):\n if isinstance(param, str):\n param = param.encode()\n m = hashlib.md5()\n m.update(param)\n return m.hexdigest()\n\n @staticmethod\n def sub_url_scheme(website, replace_str):\n scheme_regex = '^(http://|https://)'\n return re.sub(scheme_regex, replace_str, website)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nimport hashlib\nimport re\nfrom datetime import datetime\n\nimport gevent\nimport requests\nimport scrapy\nfrom gevent.pool import Pool\nfrom lxml import etree\nfrom scrapy.http import HtmlResponse\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy.orm import sessionmaker\n\nfrom ..items import Album, AlbumImageRelationItem, AlbumItem, AlbumImageItem\nfrom ..utils.const import const\nfrom ..utils.redis_util import get_redis_conn_from_pool\n\n\nclass Beautyleg7Spider(scrapy.Spider):\n name = 'Beautyleg7Spider'\n category_list = ['siwameitui', 'xingganmeinv', 'weimeixiezhen', 'ribenmeinv']\n start_urls = [('http://www.beautyleg7.com/' + category) for category in category_list]\n\n const.REPEATED_THRESHOLD = 10\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name=None, **kwargs)\n\n self.db_session = None\n\n self.gevent_pool = Pool(32)\n\n self.redis_cmd = get_redis_conn_from_pool()\n\n self.ALBUM_URL_REDIS_KEY_PREFIX = \"album_url\"\n self.REDIS_LIMITER = \":\"\n self.album_last_item_redis_unique_key = \"\"\n self.album_item = None\n self.album_image_item_list = []\n self.album_image_relation_item = AlbumImageRelationItem()\n\n def start_requests(self):\n mysql_host = self.crawler.settings.get(\"MYSQL_HOST\")\n mysql_port = self.crawler.settings.get(\"MYSQL_PORT\")\n mysql_user = self.crawler.settings.get(\"MYSQL_USER\")\n mysql_password = self.crawler.settings.get(\"MYSQL_PASSWORD\")\n mysql_db_name = self.crawler.settings.get(\"MYSQL_DB_NAME\")\n engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(mysql_user, mysql_password,\n mysql_host, mysql_port,\n mysql_db_name),\n pool_recycle=180, echo=False)\n session_maker = sessionmaker(bind=engine)\n self.db_session = session_maker()\n\n for url in self.start_urls:\n yield scrapy.Request(url)\n\n def parse(self, response):\n if self.db_session is None:\n self.logger.error(\"db_session is None\")\n return None\n repeated_count = 0\n if response is None:\n self.logger.warn(\"响应为空,不做处理!\")\n else:\n album_nodes = response.css('.pic .item')\n category = response.css('.sitepath a')[1].css('a::text').extract_first().strip()\n\n # 判断最后一页的最后主题是否被持久化\n is_persisted_last_item = self.redis_cmd.get(self.album_last_item_redis_unique_key)\n is_last_item_finished = False\n if is_persisted_last_item is not None and int(is_persisted_last_item):\n is_last_item_finished = True\n self.logger.info(\"已持久化最后一页的最后主题:%s\" % self.album_last_item_redis_unique_key)\n\n # 如果是最后一页则设置Redis存储key:“最后一页页码:最后一条主题url”,value:is_persisted(取值为0或1,默认为0)\n album_last_page_url = response.meta.get(\"album_last_page_url\")\n if album_last_page_url is not None:\n album_last_page_url_last_item_redis_suffix = album_nodes[-1].css('.p a::attr(href)').extract_first()\n self.album_last_item_redis_unique_key = self.ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + \\\n self.sub_url_scheme(album_last_page_url,\n \"\") + self.REDIS_LIMITER + \\\n self.sub_url_scheme(album_last_page_url_last_item_redis_suffix,\n \"\")\n\n self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)\n\n for album_node in album_nodes:\n album_url = album_node.css('.p a::attr(href)').extract_first().strip()\n # 判断当前主题url是否已持久化\n is_persisted = self.redis_cmd.get(album_url)\n if is_persisted is not None and int(is_persisted):\n self.logger.info(\"Redis中该url album_url:%s已持久化\" % album_url)\n continue\n\n album_url_object_id = self.get_md5(album_url)\n # 只有name不存在时,当前set操作才执行\n self.redis_cmd.setnx(album_url, 0)\n count = 0\n try:\n count = self.db_session.query(func.count()).filter(\n Album.album_url_object_id == album_url_object_id).first()\n if count:\n count = count[0]\n except Exception as e:\n self.logger.error(\"查询数据库异常,原因:{}\".format(e))\n finally:\n self.db_session.rollback()\n\n if count:\n self.logger.info(\"数据库已有该数据album_url_object_id:%s\" % album_url_object_id)\n repeated_count += 1\n # 只有name存在时,当前set操作才执行\n self.redis_cmd.set(album_url, 1, xx=True)\n continue\n else:\n album_item = self.parse_album_item(album_node, album_url, album_url_object_id, category)\n yield response.follow(url=album_url,\n meta={\"AlbumItem\": album_item},\n callback=self.parse_detail)\n\n # 提取下一页并交给scrapy下载\n selector_list = response.css('.page li a::attr(href)')\n # 如果最后一页的最后一个主题url未被持久化则继续爬取\n if not is_last_item_finished:\n if selector_list:\n last_page_url = None\n current_url_page = response.xpath('//li[@class=\"thisclass\"]//text()').extract_first()\n # 如果当前页是第一页则获取最后一页url\n if current_url_page and int(current_url_page) == 1:\n last_page_url = selector_list[-1].extract()\n\n next_url = selector_list[-2].extract()\n if next_url == last_page_url:\n album_last_page_url = response.urljoin(last_page_url)\n self.logger.info(\"Last page:%s\" % album_last_page_url)\n else:\n self.logger.info(\"Next page:%s\" % response.urljoin(next_url))\n yield response.follow(url=next_url,\n meta={\"album_last_page_url\": album_last_page_url},\n callback=self.parse)\n else:\n self.logger.info(\"selector_list is None\")\n self.logger.info(\"重复次数:%s\" % repeated_count)\n else:\n self.logger.info(\"Stop crawler. None Next page!\")\n\n def parse_album_item(self, album_node, album_url, album_url_object_id, category):\n album_title = album_node.css('.p a img::attr(alt)').extract_first().strip()\n cover_url = album_node.css('.p a img::attr(src)').extract_first().strip()\n regex = \"\\d+\\.\\d+.\\d+\\s+No\\.\\d+|\\d+\\-\\d+-\\d+\\s+No\\.\\d+\"\n number_group = re.findall(regex, album_title)\n if len(number_group) > 0:\n number = number_group[0]\n else:\n number = \"No.unknown\"\n create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n album_item = AlbumItem()\n album_item['category'] = category\n album_item['album_url'] = album_url\n album_item['album_url_object_id'] = album_url_object_id\n album_item['album_title'] = album_title\n album_item['cover_url'] = cover_url\n album_item['number'] = number\n album_item['create_date'] = create_date\n return album_item\n\n def parse_detail(self, response):\n self.album_item = response.meta.get(\"AlbumItem\")\n self.album_image_relation_item['album_item'] = self.album_item\n self.parse_album_image_item(response)\n # 详情页分页链接,循环生成所有子页面的请求\n relative_next_page_list = response.css('.page li a::attr(href)').extract()\n # 使用gevent协程池提升网络IO处理效率\n next_page_threads = [\n self.gevent_pool.spawn(self.get_album_image_item_list, response.urljoin(relative_next_page))\n for relative_next_page in relative_next_page_list[2:-1]\n ]\n gevent.joinall(next_page_threads)\n self.album_image_relation_item['album_image_item_list'] = self.album_image_item_list\n # 重新初始化\n self.album_image_item_list = []\n yield self.album_image_relation_item\n\n def get_album_image_item_list(self, abs_next_page):\n \"\"\"\n 使用下页绝对路径同步请求\n :param abs_next_page:\n :return:\n \"\"\"\n resp = requests.get(abs_next_page)\n if resp.status_code == 200:\n encoding = requests.utils.get_encodings_from_content(resp.text)\n resp.encoding = encoding[0]\n self.parse_album_image_item(etree.HTML(resp.text))\n else:\n self.logger.warn(\"下载此页{}失败,返回的状态码为{}\".format(abs_next_page, resp.status_code))\n\n def parse_album_image_item(self, response):\n \"\"\"\n 解析item并返回给pipelines\n :param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析\n :return:\n \"\"\"\n if isinstance(response, HtmlResponse):\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()').extract_first().strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()').extract_first().split(\":\")[1]\n image_link_list = response.css('.contents a img::attr(src)').extract()\n else:\n item_title = response.xpath('//div[@class=\"content\"]/h1/text()')[0].strip()\n publish_date = response.xpath('//div[@class=\"tit\"]/span/text()')[0].split(\":\")[1]\n image_link_list = response.xpath('//div[@class=\"contents\"]/a/img')\n image_link_list = [image_link.attrib['src'] for image_link in image_link_list]\n\n regex = \"\\s?\\w+[^\\w]?\"\n regex_group = re.findall(regex, item_title)\n stage_name = \"unknown\"\n if len(regex_group) > 0:\n str = regex_group[-1]\n if \"[\" in str:\n stage_name = str.split(\"[\")[0].strip()\n elif \"(\" in str:\n stage_name = str.split(\"(\")[0].strip()\n elif re.match('[^\\d*]', str):\n stage_name = re.match('[^\\d*]', str).group()\n\n # 详情页多个图片链接\n for image_url in image_link_list:\n album_image_item = AlbumImageItem()\n album_image_item['item_url'] = image_url\n album_image_item['item_url_object_id'] = self.get_md5(image_url)\n item_url_list_json = \"{}\"\n album_image_item['item_url_list_json'] = item_url_list_json\n album_image_item['item_title'] = item_title\n album_image_item['stage_name'] = stage_name\n album_image_item['publish_date'] = publish_date\n self.album_image_item_list.append(album_image_item)\n return self.album_image_item_list\n\n @staticmethod\n def get_md5(param):\n if isinstance(param, str):\n param = param.encode()\n m = hashlib.md5()\n m.update(param)\n return m.hexdigest()\n\n @staticmethod\n def sub_url_scheme(website, replace_str):\n scheme_regex = \"^(http://|https://)\"\n return re.sub(scheme_regex, replace_str, website)\n",
"step-ids": [
3,
6,
8,
9,
13
]
}
|
[
3,
6,
8,
9,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def grouping(w):
d = dd(list)
for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,
key=str.casefold)):
d[k].append(v)
return dict(sorted(d.items()))
<|reserved_special_token_1|>
from collections import defaultdict as dd
def grouping(w):
d = dd(list)
for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,
key=str.casefold)):
d[k].append(v)
return dict(sorted(d.items()))
|
flexible
|
{
"blob_id": "545794cf4f0b2ab63b6a90951a78f8bdaca3c9e6",
"index": 390,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef grouping(w):\n d = dd(list)\n for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,\n key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n",
"step-3": "from collections import defaultdict as dd\n\n\ndef grouping(w):\n d = dd(list)\n for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,\n key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python3
"""
list = list(range(97, 123)
for (i in list):
if (i % 2 == 0):
i = (i - 32)
"""
for letter in "zYxWvUtSrQpOnMlKjIhGfEdCbA":
print('{:s}'.format(letter), end = "")
|
normal
|
{
"blob_id": "55a061a1c0cd20e5ab7413c671bc03573de1bbdf",
"index": 7754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor letter in 'zYxWvUtSrQpOnMlKjIhGfEdCbA':\n print('{:s}'.format(letter), end='')\n",
"step-3": "#!/usr/bin/python3\n\"\"\"\nlist = list(range(97, 123)\nfor (i in list):\n if (i % 2 == 0):\n i = (i - 32)\n\"\"\"\nfor letter in \"zYxWvUtSrQpOnMlKjIhGfEdCbA\":\n print('{:s}'.format(letter), end = \"\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', ListUser.as_view(), name='list'), path('register/',
UserRegister.as_view(), name='register'), path('login/', UserLogin.
as_view(), name='login'), path('delete/', UserDelete.as_view(), name=
'delete'), path('update/', UserUpdate.as_view(), name='update')]
<|reserved_special_token_1|>
from django.urls import path
from .views import *
urlpatterns = [path('', ListUser.as_view(), name='list'), path('register/',
UserRegister.as_view(), name='register'), path('login/', UserLogin.
as_view(), name='login'), path('delete/', UserDelete.as_view(), name=
'delete'), path('update/', UserUpdate.as_view(), name='update')]
<|reserved_special_token_1|>
from django.urls import path
from .views import *
urlpatterns = [
path('', ListUser.as_view() , name = 'list'),
path('register/', UserRegister.as_view() , name = 'register'),
path('login/', UserLogin.as_view() , name = 'login'),
path('delete/' , UserDelete.as_view() , name ='delete'),
path('update/' , UserUpdate.as_view() , name = 'update'),
]
|
flexible
|
{
"blob_id": "5fe4f2738285d2f4b8bbfee2c4c6d15665737ea4",
"index": 8627,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', ListUser.as_view(), name='list'), path('register/',\n UserRegister.as_view(), name='register'), path('login/', UserLogin.\n as_view(), name='login'), path('delete/', UserDelete.as_view(), name=\n 'delete'), path('update/', UserUpdate.as_view(), name='update')]\n",
"step-3": "from django.urls import path\nfrom .views import *\nurlpatterns = [path('', ListUser.as_view(), name='list'), path('register/',\n UserRegister.as_view(), name='register'), path('login/', UserLogin.\n as_view(), name='login'), path('delete/', UserDelete.as_view(), name=\n 'delete'), path('update/', UserUpdate.as_view(), name='update')]\n",
"step-4": "from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', ListUser.as_view() , name = 'list'),\n path('register/', UserRegister.as_view() , name = 'register'),\n path('login/', UserLogin.as_view() , name = 'login'),\n path('delete/' , UserDelete.as_view() , name ='delete'),\n path('update/' , UserUpdate.as_view() , name = 'update'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import random
#coming into existence, all does not begin and end at this moment;
#not yet fully conscious, you pick up only snippets of your environment
for line in sys.stdin:
line = line.strip()
randLow = random.randint(0, 10)
randHigh = random.randint(11, 20)
print line[randLow:randHigh]
|
normal
|
{
"blob_id": "f3d61a9aa4205e91811f17c4e9520811445cc6a9",
"index": 3957,
"step-1": "import sys\nimport random\n\n#coming into existence, all does not begin and end at this moment; \n#not yet fully conscious, you pick up only snippets of your environment\nfor line in sys.stdin:\n\tline = line.strip()\n\n\trandLow = random.randint(0, 10)\n\trandHigh = random.randint(11, 20)\n\n\tprint line[randLow:randHigh]",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
<|reserved_special_token_0|>
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,
flag)
return M_left, M_right, flag
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
def quad_solution_vector(b, r):
b = np.ascontiguousarray(b, dtype=np.float64)
r = np.ascontiguousarray(r, dtype=np.float64)
s = np.empty(r.shape + (3,), dtype=np.float64)
driver.quad_solution_vector(b, r, s)
return s
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,
flag)
return M_left, M_right, flag
<|reserved_special_token_1|>
__all__ = ['kepler', 'quad_solution_vector', 'contact_points']
<|reserved_special_token_0|>
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
def quad_solution_vector(b, r):
b = np.ascontiguousarray(b, dtype=np.float64)
r = np.ascontiguousarray(r, dtype=np.float64)
s = np.empty(r.shape + (3,), dtype=np.float64)
driver.quad_solution_vector(b, r, s)
return s
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,
flag)
return M_left, M_right, flag
<|reserved_special_token_1|>
__all__ = ['kepler', 'quad_solution_vector', 'contact_points']
import numpy as np
from .. import driver
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
def quad_solution_vector(b, r):
b = np.ascontiguousarray(b, dtype=np.float64)
r = np.ascontiguousarray(r, dtype=np.float64)
s = np.empty(r.shape + (3,), dtype=np.float64)
driver.quad_solution_vector(b, r, s)
return s
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,
flag)
return M_left, M_right, flag
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
__all__ = ["kepler", "quad_solution_vector", "contact_points"]
import numpy as np
from .. import driver
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
def quad_solution_vector(b, r):
b = np.ascontiguousarray(b, dtype=np.float64)
r = np.ascontiguousarray(r, dtype=np.float64)
s = np.empty(r.shape + (3,), dtype=np.float64)
driver.quad_solution_vector(b, r, s)
return s
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(
a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag
)
return M_left, M_right, flag
|
flexible
|
{
"blob_id": "ccd32a6ca98c205a6f5d4936288392251522db29",
"index": 4896,
"step-1": "<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\n<mask token>\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-2": "<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-3": "__all__ = ['kepler', 'quad_solution_vector', 'contact_points']\n<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-4": "__all__ = ['kepler', 'quad_solution_vector', 'contact_points']\nimport numpy as np\nfrom .. import driver\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-5": "# -*- coding: utf-8 -*-\n\n__all__ = [\"kepler\", \"quad_solution_vector\", \"contact_points\"]\n\n\nimport numpy as np\n\nfrom .. import driver\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(\n a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag\n )\n return M_left, M_right, flag\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def calculate(self, s: str) ->int:
nums = []
ops = []
def cal():
a = nums.pop()
b = nums.pop()
c = ops.pop()
if c == '+':
nums.append(b + a)
elif c == '-':
nums.append(b - a)
elif c == '*':
nums.append(b * a)
else:
nums.append(int(b / a))
i = 0
while i < len(s):
if s[i] == ' ':
i += 1
continue
elif s[i].isdigit():
t = ''
while i < len(s) and s[i].isdigit():
t += s[i]
i += 1
nums.append(int(t))
elif not ops:
ops.append(s[i])
i += 1
elif s[i] == '+' or s[i] == '-':
while ops:
cal()
ops.append(s[i])
i += 1
else:
while ops and (ops[-1] == '*' or ops[-1] == '/'):
cal()
ops.append(s[i])
i += 1
while ops:
cal()
return nums[-1]
|
flexible
|
{
"blob_id": "0ff8743e54509a76e9a7add4be9da279bdee82a6",
"index": 5032,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def calculate(self, s: str) ->int:\n nums = []\n ops = []\n\n def cal():\n a = nums.pop()\n b = nums.pop()\n c = ops.pop()\n if c == '+':\n nums.append(b + a)\n elif c == '-':\n nums.append(b - a)\n elif c == '*':\n nums.append(b * a)\n else:\n nums.append(int(b / a))\n i = 0\n while i < len(s):\n if s[i] == ' ':\n i += 1\n continue\n elif s[i].isdigit():\n t = ''\n while i < len(s) and s[i].isdigit():\n t += s[i]\n i += 1\n nums.append(int(t))\n elif not ops:\n ops.append(s[i])\n i += 1\n elif s[i] == '+' or s[i] == '-':\n while ops:\n cal()\n ops.append(s[i])\n i += 1\n else:\n while ops and (ops[-1] == '*' or ops[-1] == '/'):\n cal()\n ops.append(s[i])\n i += 1\n while ops:\n cal()\n return nums[-1]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import random
import colorama
from termcolor import colored
from reusables.string_manipulation import int_to_words
from app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds
from app.load_data import items, buildings, wild_mobs, names, adjectives
colorama.init()
def find_unique_names(quantity, name_list, taken_names):
free_names = [x for x in name_list if x not in taken_names]
random.shuffle(free_names)
return free_names[:quantity]
def dropper(rarity):
results = {'super rare': 100,
'rare': 50,
'uncommon': 25,
'common': 5,
'super common': 2}
quantity = 0
countdown = random.randint(0, 10)
while countdown > 0:
if random.randint(0, results[rarity]) == 1:
quantity += 1
countdown -= 1
return quantity
def drop_building(dictionary, p, limit=None):
limit = limit or len(adjectives)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1 and v['category'] != 'residence':
n = random.randint(0, quantity)
unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)
p.square.unique_building_names += unique_names
for i in range(0, quantity - n):
drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v))
unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)
p.square.unique_building_names += unique_adjectives
for i in range(0, n):
drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v))
elif quantity > 1 and v['category'] == 'residence':
unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)
p.square.unique_house_names += unique_house_names
for i in range(0, quantity):
drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v))
else:
drops_i.append(Building(name=k, p=p, **v))
return drops_i
def drop_mob(dictionary, p, limit=None, square=None):
square = square or p.square
limit = limit or len(names) - len(square.unique_mob_names)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1:
unique_names = find_unique_names(quantity, names, square.unique_mob_names)
p.square.unique_mob_names += unique_names
for i in range(0, len(unique_names)):
drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v))
else:
if k not in [n.name for n in p.square.mobs]:
drops_i.append(Mob(name=k, p=p, **v))
else:
name = find_unique_names(1, names, square.unique_mob_names)[0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.")
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):
self.name = name
self.p = p
self.quantity = 1
self.plural = plural
self.category = category or None
self.rarity = rarity or None
self.ware_list = ware_list
self.wares = self.drop_wares()
self.mobs = drop_mob(mobs, p) if mobs else None
self.jobs = self.drop_job(jobs) if jobs else None
if self.name in ('a castle', 'a volcanic base'):
self.boss_mobs_and_jobs()
def drop_wares(self):
if self.ware_list:
wares = drop_item(self.ware_list)
while not wares:
wares = drop_item(self.ware_list)
return wares
else:
return []
def drop_job(self, jobs):
drops_i = []
for k, v in jobs.items():
if odds(2):
drops_i.append(Job(name=k, location=self.p.location, **v))
return drops_i
def boss_mobs_and_jobs(self):
boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),
Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),
Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]
boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]
boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),
Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),
Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]
boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ]
random.shuffle(boss_names)
random.shuffle(boss_weapons)
random.shuffle(boss_major_armors)
random.shuffle(boss_minor_armors)
boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')
boss.health = 500
boss.equipped_weapon = boss_weapons[0]
boss.major_armor = boss_major_armors[0]
boss.minor_armor = boss_minor_armors[0]
boss.irritation_level = 10
self.mobs = [boss]
if self.name == 'a castle':
self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]
if self.name == 'a volcanic base':
self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]
class Job:
def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):
self.name = name
self.location = location
self.skills_needed = skills_needed or None
self.salary = salary or 0
self.skills_learned = skills_learned or None
self.inventory_needed = inventory_needed or None
self.application_attempts = 0
class Mob:
def __init__(self, name, p, plural, rarity, inventory=None):
self.name = name
self.p = p
self.plural = plural
self.quantity = 1
self.rarity = rarity
self.skills = self.skills()
self.quest = None
self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))
self.health = 100
self.equipped_weapon = self.equip()
major = [x for x in self.inventory if x.category == 'major armor']
minor = [x for x in self.inventory if x.category == 'minor armor']
self.major_armor = major[0] if major else None
self.minor_armor = minor[0] if minor else None
self.irritation_level = 0
def equip(self):
nice_weapons = []
for i in self.inventory:
try:
if i.weapon_rating:
nice_weapons.append(i)
except AttributeError:
pass
nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)
if nice_weapons:
self.inventory.remove(nice_weapons[0])
return nice_weapons[0]
else:
return None
@staticmethod
def skills():
""" Pick the skills for a mob, these determine what a player can get from completing a quest """
all_skills = ["strength", "patience", "cleanliness", "leadership", "communication",
"science", "math", "engineering", "intelligence", "driving"]
random.shuffle(all_skills)
return all_skills[0:2]
def generate_quest(self):
"""
inventory based
bring me x of an object to learn a skill
"""
if odds(3):
quest_items = add_dicts_together(items["master"], items[self.p.square.square_type])
quest_item = random.choice(list(quest_items.keys()))
i = Item(quest_item, 0, **quest_items[quest_item])
self.inventory.append(i)
quantity = {'super rare': '1',
'rare': '2',
'uncommon': '3',
'common': '6',
'super common': '15'}
q = quantity[i.rarity]
self.quest = i, int(q), f"{self.p.name}, if you bring " \
f"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, " \
f"I will teach you a valuable skill."
return
elif odds(5):
mobs = []
for biome, building in buildings.items():
for b, attributes in building.items():
if attributes.get('mobs'):
for k in attributes['mobs'].keys():
mobs.append(k)
for biome, mob in wild_mobs.items():
for k in mob.keys():
mobs.append(k)
target = f"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}"
print(f"Well, we'll keep this off the record, but I can arrange for some money to find its way "
f"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...")
self.p.hit_list.append(target)
return False
else:
return None
|
normal
|
{
"blob_id": "535c0975c688a19963e4c53f6029626d286b41d6",
"index": 5630,
"step-1": "<mask token>\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n <mask token>\n <mask token>\n <mask token>\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n",
"step-2": "<mask token>\n\n\nclass MapSquare:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\n 'master'], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs['master'],\n wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = p.location[0] - 2, p.location[1] + 2\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1\n ] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append('|{!s:9}{}|'.format(the_map[coordinates].\n square_type, star))\n else:\n row.append('|{!s:12}|'.format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f'{k}: {v}%.' for k, v in self.skills.items()]\n job = f'You have a job as a {self.job.name}.' if self.job else None\n quest = 'You have a quest.' if self.quest else None\n if job and quest:\n job_string = '\\n'.join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = (\n 'You do not have a job, and you are not contributing to society.'\n )\n status_string = {'health':\n f'Currently, you have {self.health} health.', 'location':\n f'You are located on map coordinates {self.location}, which is {self.square.square_type}.'\n , 'building_local': \n f'You are inside {self.building_local.name}.' if self.\n building_local else None, 'skills': '\\n'.join(skills) if skills\n else \"You don't have any skills.\", 'money':\n f'You have ${self.money} in your wallet.', 'job': job_string}\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f'You have killed {self.body_count} mobs.')\n print(f'You have ran away from {self.run_away_count} battles.')\n print(f'You have eaten {self.food_count} items.')\n print(f'You have performed {self.assassination_count} assassinations.')\n print(f'You have talked to mobs {self.greeting_count} times.')\n\n def view_hit_list(self):\n if self.hit_list:\n print(\n f'If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}'\n )\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n",
"step-3": "<mask token>\n\n\nclass MapSquare:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def generate_items(self):\n self.items = drop_item(add_dicts_together(items['master'], items[\n self.square_type]))\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\n 'master'], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs['master'],\n wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = p.location[0] - 2, p.location[1] + 2\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1\n ] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append('|{!s:9}{}|'.format(the_map[coordinates].\n square_type, star))\n else:\n row.append('|{!s:12}|'.format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f'{k}: {v}%.' for k, v in self.skills.items()]\n job = f'You have a job as a {self.job.name}.' if self.job else None\n quest = 'You have a quest.' if self.quest else None\n if job and quest:\n job_string = '\\n'.join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = (\n 'You do not have a job, and you are not contributing to society.'\n )\n status_string = {'health':\n f'Currently, you have {self.health} health.', 'location':\n f'You are located on map coordinates {self.location}, which is {self.square.square_type}.'\n , 'building_local': \n f'You are inside {self.building_local.name}.' if self.\n building_local else None, 'skills': '\\n'.join(skills) if skills\n else \"You don't have any skills.\", 'money':\n f'You have ${self.money} in your wallet.', 'job': job_string}\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f'You have killed {self.body_count} mobs.')\n print(f'You have ran away from {self.run_away_count} battles.')\n print(f'You have eaten {self.food_count} items.')\n print(f'You have performed {self.assassination_count} assassinations.')\n print(f'You have talked to mobs {self.greeting_count} times.')\n\n def view_hit_list(self):\n if self.hit_list:\n print(\n f'If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}'\n )\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n",
"step-4": "<mask token>\n\n\ndef find_unique_names(quantity, name_list, taken_names):\n free_names = [x for x in name_list if x not in taken_names]\n random.shuffle(free_names)\n return free_names[:quantity]\n\n\ndef dropper(rarity):\n results = {'super rare': 100, 'rare': 50, 'uncommon': 25, 'common': 5,\n 'super common': 2}\n quantity = 0\n countdown = random.randint(0, 10)\n while countdown > 0:\n if random.randint(0, results[rarity]) == 1:\n quantity += 1\n countdown -= 1\n return quantity\n\n\ndef drop_building(dictionary, p, limit=None):\n limit = limit or len(adjectives)\n drops_i = []\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n quantity = quantity if quantity < limit else limit\n limit -= quantity\n if quantity:\n if quantity > 1 and v['category'] != 'residence':\n n = random.randint(0, quantity)\n unique_names = find_unique_names(quantity - n, names, p.\n square.unique_building_names)\n p.square.unique_building_names += unique_names\n for i in range(0, quantity - n):\n drops_i.append(Building(name=\n f\"{unique_names[i]}'s {remove_little_words(k).capitalize()}\"\n , p=p, **v))\n unique_adjectives = find_unique_names(n, adjectives, p.\n square.unique_building_names)\n p.square.unique_building_names += unique_adjectives\n for i in range(0, n):\n drops_i.append(Building(name=\n f'the {unique_adjectives[i]} {remove_little_words(k).capitalize()}'\n , p=p, **v))\n elif quantity > 1 and v['category'] == 'residence':\n unique_house_names = find_unique_names(quantity, names, p.\n square.unique_house_names)\n p.square.unique_house_names += unique_house_names\n for i in range(0, quantity):\n drops_i.append(Building(name=\n f\"{unique_house_names[i]}'s {remove_little_words(k)}\",\n p=p, **v))\n else:\n drops_i.append(Building(name=k, p=p, **v))\n return drops_i\n\n\n<mask token>\n\n\ndef drop_item(dictionary):\n \"\"\" Randomly generates objects based on rarity \"\"\"\n drops_i = []\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n if quantity:\n drops_i.append(Item(name=k, quantity=quantity, **v))\n return drops_i\n\n\nclass MapSquare:\n\n def __init__(self, name='', square_type=None):\n square_types = ['forest', 'mountains', 'desert', 'city', 'swamp',\n 'ocean']\n self.square_type = square_type or square_types[random.randint(0, \n len(square_types) - 1)]\n self.name = name\n self.unique_mob_names = []\n self.unique_building_names = []\n self.unique_house_names = []\n mobs = []\n items = []\n buildings = []\n\n def generate_items(self):\n self.items = drop_item(add_dicts_together(items['master'], items[\n self.square_type]))\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\n 'master'], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs['master'],\n wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = p.location[0] - 2, p.location[1] + 2\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1\n ] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append('|{!s:9}{}|'.format(the_map[coordinates].\n square_type, star))\n else:\n row.append('|{!s:12}|'.format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f'{k}: {v}%.' for k, v in self.skills.items()]\n job = f'You have a job as a {self.job.name}.' if self.job else None\n quest = 'You have a quest.' if self.quest else None\n if job and quest:\n job_string = '\\n'.join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = (\n 'You do not have a job, and you are not contributing to society.'\n )\n status_string = {'health':\n f'Currently, you have {self.health} health.', 'location':\n f'You are located on map coordinates {self.location}, which is {self.square.square_type}.'\n , 'building_local': \n f'You are inside {self.building_local.name}.' if self.\n building_local else None, 'skills': '\\n'.join(skills) if skills\n else \"You don't have any skills.\", 'money':\n f'You have ${self.money} in your wallet.', 'job': job_string}\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f'You have killed {self.body_count} mobs.')\n print(f'You have ran away from {self.run_away_count} battles.')\n print(f'You have eaten {self.food_count} items.')\n print(f'You have performed {self.assassination_count} assassinations.')\n print(f'You have talked to mobs {self.greeting_count} times.')\n\n def view_hit_list(self):\n if self.hit_list:\n print(\n f'If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}'\n )\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n",
"step-5": "import random\n\nimport colorama\nfrom termcolor import colored\nfrom reusables.string_manipulation import int_to_words\n\nfrom app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds\nfrom app.load_data import items, buildings, wild_mobs, names, adjectives\n\n\ncolorama.init()\n\n\ndef find_unique_names(quantity, name_list, taken_names):\n free_names = [x for x in name_list if x not in taken_names]\n random.shuffle(free_names)\n return free_names[:quantity]\n\n\ndef dropper(rarity):\n results = {'super rare': 100,\n 'rare': 50,\n 'uncommon': 25,\n 'common': 5,\n 'super common': 2}\n quantity = 0\n countdown = random.randint(0, 10)\n while countdown > 0:\n if random.randint(0, results[rarity]) == 1:\n quantity += 1\n countdown -= 1\n return quantity\n\n\ndef drop_building(dictionary, p, limit=None):\n limit = limit or len(adjectives)\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n quantity = quantity if quantity < limit else limit\n limit -= quantity\n if quantity:\n if quantity > 1 and v['category'] != 'residence':\n n = random.randint(0, quantity)\n unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)\n p.square.unique_building_names += unique_names\n for i in range(0, quantity - n):\n drops_i.append(Building(name=f\"{unique_names[i]}'s {remove_little_words(k).capitalize()}\", p=p, **v))\n unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)\n p.square.unique_building_names += unique_adjectives\n for i in range(0, n):\n drops_i.append(Building(name=f\"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}\", p=p, **v))\n\n elif quantity > 1 and v['category'] == 'residence':\n unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)\n p.square.unique_house_names += unique_house_names\n for i in range(0, quantity):\n drops_i.append(Building(name=f\"{unique_house_names[i]}'s {remove_little_words(k)}\", p=p, **v))\n else:\n drops_i.append(Building(name=k, p=p, **v))\n return drops_i\n\n\ndef drop_mob(dictionary, p, limit=None, square=None):\n square = square or p.square\n limit = limit or len(names) - len(square.unique_mob_names)\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n quantity = quantity if quantity < limit else limit\n limit -= quantity\n if quantity:\n if quantity > 1:\n unique_names = find_unique_names(quantity, names, square.unique_mob_names)\n p.square.unique_mob_names += unique_names\n for i in range(0, len(unique_names)):\n drops_i.append(Mob(name=f\"{k} named {unique_names[i]}\", p=p, **v))\n else:\n if k not in [n.name for n in p.square.mobs]:\n drops_i.append(Mob(name=k, p=p, **v))\n else:\n name = find_unique_names(1, names, square.unique_mob_names)[0]\n drops_i.append(Mob(name=f\"{k} named {name}\", p=p, **v))\n return drops_i\n\n\ndef drop_item(dictionary):\n \"\"\" Randomly generates objects based on rarity \"\"\"\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n if quantity:\n drops_i.append(Item(name=k, quantity=quantity, **v))\n\n return drops_i\n\n\nclass MapSquare:\n def __init__(self, name=\"\", square_type=None):\n square_types = [\"forest\", \"mountains\", \"desert\", \"city\", \"swamp\", \"ocean\"]\n self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]\n self.name = name\n self.unique_mob_names = []\n self.unique_building_names = []\n self.unique_house_names = []\n\n mobs = []\n items = []\n buildings = []\n\n def generate_items(self):\n self.items = drop_item(add_dicts_together(items[\"master\"], items[self.square_type]))\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\"master\"], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs[\"master\"], wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = (p.location[0] - 2, p.location[1] + 2)\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append(\"|{!s:9}{}|\".format(the_map[coordinates].square_type, star))\n else:\n row.append(\"|{!s:12}|\".format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = \"day\"\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n # TODO increase insurance cost every death?\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored(\"You have won the game!\", \"green\"))\n print(\"You may continue playing to earn more achievements if you wish.\")\n if self.run_away_count == 0:\n print(\"Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.\")\n if self.run_away_count > 100:\n print(\"Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.\")\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs[\"master\"], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n\n if item.quantity > 1:\n formatted.append(f\"{int_to_words(item.quantity)} {item.plural}\")\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return \"nothing\"\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n\n armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]\n\n inventory = {'inventory_items': f\"You have {self.formatted_inventory()} in your inventory.\",\n 'weapon': f\"You are wielding {int_to_words(w.quantity)} \"\n f\"{remove_little_words(w.name) if w.quantity == 1 else w.plural}.\" if w else None,\n 'armor': f\"You are wearing {' and '.join(x for x in armors if x)}, \"\n f\"giving you a {armor_defense}% reduction in incoming damage.\" if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f\"{k}: {v}%.\" for k, v in self.skills.items()]\n\n job = f\"You have a job as a {self.job.name}.\" if self.job else None\n quest = \"You have a quest.\" if self.quest else None\n if job and quest:\n job_string = \"\\n\".join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = \"You do not have a job, and you are not contributing to society.\"\n\n status_string = {\n 'health': f'Currently, you have {self.health} health.',\n 'location': f'You are located on map coordinates {self.location}, '\n f'which is {self.square.square_type}.',\n 'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,\n 'skills': '\\n'.join(skills) if skills else \"You don't have any skills.\",\n 'money': f\"You have ${self.money} in your wallet.\",\n 'job': job_string}\n\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f\"You have killed {self.body_count} mobs.\")\n print(f\"You have ran away from {self.run_away_count} battles.\")\n print(f\"You have eaten {self.food_count} items.\")\n print(f\"You have performed {self.assassination_count} assassinations.\")\n print(f\"You have talked to mobs {self.greeting_count} times.\")\n\n def view_hit_list(self):\n if self.hit_list:\n print(f\"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}\")\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(f\"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.\")\n\n\nclass Item:\n def __init__(self, name, quantity, plural, category=None, perishable=None,\n flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,\n perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,\n weapon_rating=self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),\n Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),\n Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),\n Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),\n Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),\n Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),\n Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]\n boss_names = [\"the Terrifying Dragon of Soul Slaying\", \"the Great Salamander of Darkness\", \"the Squirrel of Destiny\", ]\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]\n\n\nclass Job:\n def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n\n self.skills = self.skills()\n self.quest = None\n\n self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n\n if odds(3):\n\n quest_items = add_dicts_together(items[\"master\"], items[self.p.square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n\n quantity = {'super rare': '1',\n 'rare': '2',\n 'uncommon': '3',\n 'common': '6',\n 'super common': '15'}\n q = quantity[i.rarity]\n\n self.quest = i, int(q), f\"{self.p.name}, if you bring \" \\\n f\"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, \" \\\n f\"I will teach you a valuable skill.\"\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = f\"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}\"\n print(f\"Well, we'll keep this off the record, but I can arrange for some money to find its way \"\n f\"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\")\n self.p.hit_list.append(target)\n return False\n\n else:\n return None\n",
"step-ids": [
23,
31,
32,
38,
42
]
}
|
[
23,
31,
32,
38,
42
] |
__version__ = '3.13.7'
|
normal
|
{
"blob_id": "01852f6dbeb78df3098b14d2f0538ad9193ea511",
"index": 9873,
"step-1": "<mask token>\n",
"step-2": "__version__ = '3.13.7'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from ParseTree import ParseTree
from Node import Node
from NodeInfo import NodeInfo
from TreeAdjustor import TreeAdjustor
from model.SchemaGraph import SchemaGraph
class TreeAdjustorTest:
schema = None
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word="DEFAULT", posTag="DEFAULT") for i in range(0, 8)]
nodes[0] = Node(index=0, word="ROOT", posTag="--")
nodes[0].info = NodeInfo(type="ROOT", value="ROOT")
nodes[1] = Node(index=1, word="return", posTag="--")
nodes[1].info = NodeInfo(type="SN", value="SELECT")
nodes[2] = Node(index=2, word="conference", posTag="--")
nodes[2].info = NodeInfo(type="NN", value="Author")
nodes[3] = Node(index=3, word="area", posTag="--")
nodes[3].info = NodeInfo(type="NN", value="Title")
nodes[4] = Node(index=4, word="papers", posTag="--")
nodes[4].info = NodeInfo(type="NN", value="Author")
nodes[5] = Node(index=5, word="citations", posTag="--")
nodes[5].info = NodeInfo(type="NN", value="Journal")
nodes[6] = Node(index=6, word="most", posTag="--")
nodes[6].info = NodeInfo(type="FN", value=">")
nodes[7] = Node(index=7, word="total", posTag="--")
nodes[7].info = NodeInfo(type="FN", value="Year")
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print ("===========test for Running getAdjustedTrees() in TreeAdjustor===========")
print ("The original tree:")
print (T.toString())
print ("Number of possible trees for choice:")
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
# result = TreeAdjustor.adjust(T)
print (len(result))
# result = sorted(result,cmp=TreeAdjustorTest.cmpp)
# l =sorted(m, cmp =TreeAdjustor.timeStampCompare)
for i in range(0, len(result)):
for j in range(i+1, len(result)):
if(result[i].getScore() <= result[j].getScore()):
temp = result[i]
result[i] =result[j]
result[j] = temp
print ("The three trees with highest scores look like:")
for i in range(0,5):
print (result[i])
for tree in result:
print (" treeList Result %s:%d" % (tree.getSentence(), tree.getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print ("qUERY: " + query.toString())
def adjustTest(self):
T = ParseTree()
nodes = [Node(index=-1, word="DEFAULT", posTag="DEFAULT") for i in range(0, 9)]
nodes[0] = Node(index=0, word="ROOT",posTag= "--")
nodes[0].info = NodeInfo(type="ROOT", value="ROOT")
nodes[1] = Node(index=1, word="return", posTag="--")
nodes[1].info = NodeInfo(type="SN", value="SELECT")
nodes[2] = Node(index=2, word="conference", posTag="--")
nodes[2].info = NodeInfo(type="NN", value="Author")
nodes[3] = Node(index=3, word="area", posTag="--")
nodes[3].info =NodeInfo(type="NN", value="Title")
nodes[4] =Node(index=4, word="each", posTag="--")
nodes[4].info = NodeInfo(type="QN", value=">")
nodes[5] = Node(index=5, word="papers", posTag="--")
nodes[5].info = NodeInfo(type="NN", value="Author")
nodes[6] = Node(index=6, word="citations", posTag="--")
nodes[6].info = NodeInfo(type="NN", value="Journal")
nodes[7] = Node(index=7, word="most", posTag="--")
nodes[7].info = NodeInfo(type="FN", value=">")
nodes[8] = Node(index=8, word="total", posTag="--")
nodes[8].info = NodeInfo(type="FN", value="Year")
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[5])
nodes[5].parent = nodes[2]
nodes[3].children.append(nodes[4])
nodes[4].parent = nodes[3]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[6].children.append(nodes[7])
nodes[7].parent = nodes[6]
nodes[6].children.append(nodes[8])
nodes[8].parent = nodes[6]
print ("===========test for Running adjust() in TreeAdjustor===========")
treeList = TreeAdjustor.adjust(T)
print ("Output size: %d"%len(treeList))
print ("Output trees:")
ctr=0
for tr in treeList:
print ("Tree %d %s"%(ctr, tr.getSentence()))
ctr+=1
@staticmethod
def cmpp(a,b):
return a.getScore() > b.getScore()
obj = TreeAdjustorTest()
obj.getAdjustedTreesTest()
# obj.adjustTest()
|
normal
|
{
"blob_id": "1db397df2d030b2f622e701c46c15d653cb79e55",
"index": 5079,
"step-1": "<mask token>\n\n\nclass TreeAdjustorTest:\n <mask token>\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TreeAdjustorTest:\n <mask token>\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 9)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='each', posTag='--')\n nodes[4].info = NodeInfo(type='QN', value='>')\n nodes[5] = Node(index=5, word='papers', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Author')\n nodes[6] = Node(index=6, word='citations', posTag='--')\n nodes[6].info = NodeInfo(type='NN', value='Journal')\n nodes[7] = Node(index=7, word='most', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='>')\n nodes[8] = Node(index=8, word='total', posTag='--')\n nodes[8].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n print('===========test for Running adjust() in TreeAdjustor==========='\n )\n treeList = TreeAdjustor.adjust(T)\n print('Output size: %d' % len(treeList))\n print('Output trees:')\n ctr = 0\n for tr in treeList:\n print('Tree %d %s' % (ctr, tr.getSentence()))\n ctr += 1\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TreeAdjustorTest:\n schema = None\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 9)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='each', posTag='--')\n nodes[4].info = NodeInfo(type='QN', value='>')\n nodes[5] = Node(index=5, word='papers', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Author')\n nodes[6] = Node(index=6, word='citations', posTag='--')\n nodes[6].info = NodeInfo(type='NN', value='Journal')\n nodes[7] = Node(index=7, word='most', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='>')\n nodes[8] = Node(index=8, word='total', posTag='--')\n nodes[8].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n print('===========test for Running adjust() in TreeAdjustor==========='\n )\n treeList = TreeAdjustor.adjust(T)\n print('Output size: %d' % len(treeList))\n print('Output trees:')\n ctr = 0\n for tr in treeList:\n print('Tree %d %s' % (ctr, tr.getSentence()))\n ctr += 1\n\n @staticmethod\n def cmpp(a, b):\n return a.getScore() > b.getScore()\n\n\nobj = TreeAdjustorTest()\nobj.getAdjustedTreesTest()\n",
"step-4": "from ParseTree import ParseTree\nfrom Node import Node\nfrom NodeInfo import NodeInfo\nfrom TreeAdjustor import TreeAdjustor\nfrom model.SchemaGraph import SchemaGraph\n\n\nclass TreeAdjustorTest:\n schema = None\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 9)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='each', posTag='--')\n nodes[4].info = NodeInfo(type='QN', value='>')\n nodes[5] = Node(index=5, word='papers', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Author')\n nodes[6] = Node(index=6, word='citations', posTag='--')\n nodes[6].info = NodeInfo(type='NN', value='Journal')\n nodes[7] = Node(index=7, word='most', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='>')\n nodes[8] = Node(index=8, word='total', posTag='--')\n nodes[8].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n print('===========test for Running adjust() in TreeAdjustor==========='\n )\n treeList = TreeAdjustor.adjust(T)\n print('Output size: %d' % len(treeList))\n print('Output trees:')\n ctr = 0\n for tr in treeList:\n print('Tree %d %s' % (ctr, tr.getSentence()))\n ctr += 1\n\n @staticmethod\n def cmpp(a, b):\n return a.getScore() > b.getScore()\n\n\nobj = TreeAdjustorTest()\nobj.getAdjustedTreesTest()\n",
"step-5": "\nfrom ParseTree import ParseTree\nfrom Node import Node\nfrom NodeInfo import NodeInfo\nfrom TreeAdjustor import TreeAdjustor\nfrom model.SchemaGraph import SchemaGraph\n\n\nclass TreeAdjustorTest:\n\n schema = None\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n\n\n\n T = ParseTree()\n nodes = [Node(index=-1, word=\"DEFAULT\", posTag=\"DEFAULT\") for i in range(0, 8)]\n\n nodes[0] = Node(index=0, word=\"ROOT\", posTag=\"--\")\n nodes[0].info = NodeInfo(type=\"ROOT\", value=\"ROOT\")\n nodes[1] = Node(index=1, word=\"return\", posTag=\"--\")\n nodes[1].info = NodeInfo(type=\"SN\", value=\"SELECT\")\n nodes[2] = Node(index=2, word=\"conference\", posTag=\"--\")\n nodes[2].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[3] = Node(index=3, word=\"area\", posTag=\"--\")\n nodes[3].info = NodeInfo(type=\"NN\", value=\"Title\")\n nodes[4] = Node(index=4, word=\"papers\", posTag=\"--\")\n nodes[4].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[5] = Node(index=5, word=\"citations\", posTag=\"--\")\n nodes[5].info = NodeInfo(type=\"NN\", value=\"Journal\")\n nodes[6] = Node(index=6, word=\"most\", posTag=\"--\")\n nodes[6].info = NodeInfo(type=\"FN\", value=\">\")\n nodes[7] = Node(index=7, word=\"total\", posTag=\"--\")\n nodes[7].info = NodeInfo(type=\"FN\", value=\"Year\")\n\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n\n print (\"===========test for Running getAdjustedTrees() in TreeAdjustor===========\")\n print (\"The original tree:\")\n print (T.toString())\n print (\"Number of possible trees for choice:\")\n\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n # result = TreeAdjustor.adjust(T)\n\n print (len(result))\n # result = sorted(result,cmp=TreeAdjustorTest.cmpp)\n # l =sorted(m, cmp =TreeAdjustor.timeStampCompare)\n for i in range(0, len(result)):\n for j in range(i+1, len(result)):\n if(result[i].getScore() <= result[j].getScore()):\n temp = result[i]\n result[i] =result[j]\n result[j] = temp\n print (\"The three trees with highest scores look like:\")\n for i in range(0,5):\n print (result[i])\n\n for tree in result:\n print (\" treeList Result %s:%d\" % (tree.getSentence(), tree.getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print (\"qUERY: \" + query.toString())\n \n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word=\"DEFAULT\", posTag=\"DEFAULT\") for i in range(0, 9)]\n nodes[0] = Node(index=0, word=\"ROOT\",posTag= \"--\")\n nodes[0].info = NodeInfo(type=\"ROOT\", value=\"ROOT\")\n nodes[1] = Node(index=1, word=\"return\", posTag=\"--\")\n nodes[1].info = NodeInfo(type=\"SN\", value=\"SELECT\")\n nodes[2] = Node(index=2, word=\"conference\", posTag=\"--\")\n nodes[2].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[3] = Node(index=3, word=\"area\", posTag=\"--\")\n nodes[3].info =NodeInfo(type=\"NN\", value=\"Title\")\n nodes[4] =Node(index=4, word=\"each\", posTag=\"--\")\n nodes[4].info = NodeInfo(type=\"QN\", value=\">\")\n nodes[5] = Node(index=5, word=\"papers\", posTag=\"--\")\n nodes[5].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[6] = Node(index=6, word=\"citations\", posTag=\"--\")\n nodes[6].info = NodeInfo(type=\"NN\", value=\"Journal\")\n nodes[7] = Node(index=7, word=\"most\", posTag=\"--\")\n nodes[7].info = NodeInfo(type=\"FN\", value=\">\")\n nodes[8] = Node(index=8, word=\"total\", posTag=\"--\")\n nodes[8].info = NodeInfo(type=\"FN\", value=\"Year\")\n\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n\n print (\"===========test for Running adjust() in TreeAdjustor===========\")\n\n treeList = TreeAdjustor.adjust(T)\n print (\"Output size: %d\"%len(treeList))\n\n print (\"Output trees:\")\n ctr=0\n for tr in treeList:\n print (\"Tree %d %s\"%(ctr, tr.getSentence()))\n ctr+=1\n @staticmethod\n def cmpp(a,b):\n\n return a.getScore() > b.getScore()\n\nobj = TreeAdjustorTest()\nobj.getAdjustedTreesTest()\n# obj.adjustTest()\n\n\n\n",
"step-ids": [
3,
4,
8,
9,
10
]
}
|
[
3,
4,
8,
9,
10
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.