code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
def main():
detectPeriod('我要去游泳一個小時')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def detectPeriod(data):
numWord = '[0-9,一二三四五六七八九十兩半]'
hourWord = '小時鐘頭'
minWord = '分鐘'
secWord = '秒鐘'
timePat = ('[' + numWord + ']+點?\\.?[' + numWord + ']*個?半?[' + hourWord +
']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +
secWord + ']*')
def main():
detectPeriod('我要去游泳一個小時')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def detectPeriod(data):
numWord = '[0-9,一二三四五六七八九十兩半]'
hourWord = '小時鐘頭'
minWord = '分鐘'
secWord = '秒鐘'
timePat = ('[' + numWord + ']+點?\\.?[' + numWord + ']*個?半?[' + hourWord +
']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +
secWord + ']*')
def main():
detectPeriod('我要去游泳一個小時')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import re
def detectPeriod(data):
numWord = '[0-9,一二三四五六七八九十兩半]'
hourWord = '小時鐘頭'
minWord = '分鐘'
secWord = '秒鐘'
timePat = ('[' + numWord + ']+點?\\.?[' + numWord + ']*個?半?[' + hourWord +
']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +
secWord + ']*')
def main():
detectPeriod('我要去游泳一個小時')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import re
def detectPeriod(data):
numWord = "[0-9,一二三四五六七八九十兩半]"
hourWord = "小時鐘頭"
minWord = "分鐘"
secWord = "秒鐘"
timePat = "["+numWord+"]+點?\.?["+numWord+"]*個?半?["+hourWord+"]*半?又?["+numWord+"]*["+minWord+"]*又?["+numWord+"]*["+secWord+"]*"
def main():
detectPeriod("我要去游泳一個小時")
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "397686964acbf640a5463a3a7095d85832545d9e",
"index": 6462,
"step-1": "<mask token>\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef detectPeriod(data):\n numWord = '[0-9,一二三四五六七八九十兩半]'\n hourWord = '小時鐘頭'\n minWord = '分鐘'\n secWord = '秒鐘'\n timePat = ('[' + numWord + ']+點?\\\\.?[' + numWord + ']*個?半?[' + hourWord +\n ']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +\n secWord + ']*')\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef detectPeriod(data):\n numWord = '[0-9,一二三四五六七八九十兩半]'\n hourWord = '小時鐘頭'\n minWord = '分鐘'\n secWord = '秒鐘'\n timePat = ('[' + numWord + ']+點?\\\\.?[' + numWord + ']*個?半?[' + hourWord +\n ']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +\n secWord + ']*')\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import re\n\n\ndef detectPeriod(data):\n numWord = '[0-9,一二三四五六七八九十兩半]'\n hourWord = '小時鐘頭'\n minWord = '分鐘'\n secWord = '秒鐘'\n timePat = ('[' + numWord + ']+點?\\\\.?[' + numWord + ']*個?半?[' + hourWord +\n ']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +\n secWord + ']*')\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import re\n\n\ndef detectPeriod(data):\n \n numWord = \"[0-9,一二三四五六七八九十兩半]\"\n hourWord = \"小時鐘頭\"\n minWord = \"分鐘\"\n secWord = \"秒鐘\"\n\n\n timePat = \"[\"+numWord+\"]+點?\\.?[\"+numWord+\"]*個?半?[\"+hourWord+\"]*半?又?[\"+numWord+\"]*[\"+minWord+\"]*又?[\"+numWord+\"]*[\"+secWord+\"]*\"\n\n\n\n\ndef main():\n detectPeriod(\"我要去游泳一個小時\")\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from draft import *
# create a train station
platform = Platform('platform 1')
train_station = TrainStation('Linz')
train_station.add_platform(platform)
# create a train
train_1 = ICE('ICE 1')
platform.accept_train(train_1)
train_section_1 = TrainSection('First section')
train_section_2 = TrainSection('Second section')
train_section_3 = TrainSection('Third section')
train_1.dock_section(train_section_1)
train_1.dock_section(train_section_2)
train_1.dock_section(train_section_3)
train_1.print_sections()
# Expected output: First section - Second section - Third section
# create persons
person_1 = Person('Franz', 'Mair')
person_2 = Person('Michael', 'Schuh')
person_3 = Person('Herbert', 'Sailer')
person_4 = Person('Michaela', 'Mader')
train_section_1.get_on_train(person_1)
# Expected output: Franz Mair is on the train now
train_section_1.get_on_train(person_2)
# Expected output: Michael Schuh is on the train now
train_section_2.get_on_train(person_3)
# Expected output: Herbert Sailer is on the train now
train_section_3.get_on_train(person_4)
# Expected output: Michaela Mader is on the train now
train_section_2.get_off_train(person_3)
# Expected output: Herbert Sailer has left the train
# query passengers
train_1.show_current_passengers()
# Expected output: Franz Mair, Michel Schuh, Michaela Mader
train_1.count_passengers()
# Expected output: 3
|
normal
|
{
"blob_id": "5900dc0acde45ac9a31dc9d489aa8dae304d626b",
"index": 1791,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntrain_station.add_platform(platform)\n<mask token>\nplatform.accept_train(train_1)\n<mask token>\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\n<mask token>\ntrain_section_1.get_on_train(person_1)\ntrain_section_1.get_on_train(person_2)\ntrain_section_2.get_on_train(person_3)\ntrain_section_3.get_on_train(person_4)\ntrain_section_2.get_off_train(person_3)\ntrain_1.show_current_passengers()\ntrain_1.count_passengers()\n",
"step-3": "<mask token>\nplatform = Platform('platform 1')\ntrain_station = TrainStation('Linz')\ntrain_station.add_platform(platform)\ntrain_1 = ICE('ICE 1')\nplatform.accept_train(train_1)\ntrain_section_1 = TrainSection('First section')\ntrain_section_2 = TrainSection('Second section')\ntrain_section_3 = TrainSection('Third section')\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\nperson_1 = Person('Franz', 'Mair')\nperson_2 = Person('Michael', 'Schuh')\nperson_3 = Person('Herbert', 'Sailer')\nperson_4 = Person('Michaela', 'Mader')\ntrain_section_1.get_on_train(person_1)\ntrain_section_1.get_on_train(person_2)\ntrain_section_2.get_on_train(person_3)\ntrain_section_3.get_on_train(person_4)\ntrain_section_2.get_off_train(person_3)\ntrain_1.show_current_passengers()\ntrain_1.count_passengers()\n",
"step-4": "from draft import *\nplatform = Platform('platform 1')\ntrain_station = TrainStation('Linz')\ntrain_station.add_platform(platform)\ntrain_1 = ICE('ICE 1')\nplatform.accept_train(train_1)\ntrain_section_1 = TrainSection('First section')\ntrain_section_2 = TrainSection('Second section')\ntrain_section_3 = TrainSection('Third section')\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\nperson_1 = Person('Franz', 'Mair')\nperson_2 = Person('Michael', 'Schuh')\nperson_3 = Person('Herbert', 'Sailer')\nperson_4 = Person('Michaela', 'Mader')\ntrain_section_1.get_on_train(person_1)\ntrain_section_1.get_on_train(person_2)\ntrain_section_2.get_on_train(person_3)\ntrain_section_3.get_on_train(person_4)\ntrain_section_2.get_off_train(person_3)\ntrain_1.show_current_passengers()\ntrain_1.count_passengers()\n",
"step-5": "from draft import *\n# create a train station\nplatform = Platform('platform 1')\ntrain_station = TrainStation('Linz')\ntrain_station.add_platform(platform)\n# create a train\ntrain_1 = ICE('ICE 1')\nplatform.accept_train(train_1)\ntrain_section_1 = TrainSection('First section')\ntrain_section_2 = TrainSection('Second section')\ntrain_section_3 = TrainSection('Third section')\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\n# Expected output: First section - Second section - Third section\n# create persons\nperson_1 = Person('Franz', 'Mair')\nperson_2 = Person('Michael', 'Schuh')\nperson_3 = Person('Herbert', 'Sailer')\nperson_4 = Person('Michaela', 'Mader')\ntrain_section_1.get_on_train(person_1)\n# Expected output: Franz Mair is on the train now\ntrain_section_1.get_on_train(person_2)\n# Expected output: Michael Schuh is on the train now\ntrain_section_2.get_on_train(person_3)\n# Expected output: Herbert Sailer is on the train now\ntrain_section_3.get_on_train(person_4)\n# Expected output: Michaela Mader is on the train now\ntrain_section_2.get_off_train(person_3)\n# Expected output: Herbert Sailer has left the train\n# query passengers\ntrain_1.show_current_passengers()\n# Expected output: Franz Mair, Michel Schuh, Michaela Mader\ntrain_1.count_passengers()\n# Expected output: 3\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RwpInstaller:
<|reserved_special_token_0|>
def extract(self, target):
with zipfile.ZipFile(target) as z:
if z.testzip():
return self.output('Corrupt file {}\n'.format(target))
self.output('{} file valid\n\n'.format(target))
extracted = 0
to_be_extracted = len(z.infolist())
for file in z.infolist():
extracted_path = z.extract(file, self.railworks_path).replace(
self.railworks_path, '')
extracted += 1
percent_complete = extracted / to_be_extracted
self.output('[{}/{} {}] {}\r'.format(extracted,
to_be_extracted, (round(percent_complete * 10) * '*').
ljust(10), extracted_path[-55:]))
self.output('\n\n{} extracted successfully'.format(os.path.
basename(target)))
def get_railworks_path(self):
steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
'Software\\Valve\\Steam')
steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]
return os.path.join(steam_path, 'steamApps', 'common', 'railworks')
def output(self, out, wait=False):
if wait:
input(out)
else:
sys.stdout.write(out)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RwpInstaller:
railworks_path = None
def extract(self, target):
with zipfile.ZipFile(target) as z:
if z.testzip():
return self.output('Corrupt file {}\n'.format(target))
self.output('{} file valid\n\n'.format(target))
extracted = 0
to_be_extracted = len(z.infolist())
for file in z.infolist():
extracted_path = z.extract(file, self.railworks_path).replace(
self.railworks_path, '')
extracted += 1
percent_complete = extracted / to_be_extracted
self.output('[{}/{} {}] {}\r'.format(extracted,
to_be_extracted, (round(percent_complete * 10) * '*').
ljust(10), extracted_path[-55:]))
self.output('\n\n{} extracted successfully'.format(os.path.
basename(target)))
def get_railworks_path(self):
steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
'Software\\Valve\\Steam')
steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]
return os.path.join(steam_path, 'steamApps', 'common', 'railworks')
def output(self, out, wait=False):
if wait:
input(out)
else:
sys.stdout.write(out)
def main(self):
targets = sys.argv[1:]
if not targets:
return self.output('No RWP files passed.', wait=True)
self.railworks_path = self.get_railworks_path()
for target in targets:
self.extract(target)
self.output('\n\nAll done. Thanks for using RWP Installer.', wait=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RwpInstaller:
railworks_path = None
def extract(self, target):
with zipfile.ZipFile(target) as z:
if z.testzip():
return self.output('Corrupt file {}\n'.format(target))
self.output('{} file valid\n\n'.format(target))
extracted = 0
to_be_extracted = len(z.infolist())
for file in z.infolist():
extracted_path = z.extract(file, self.railworks_path).replace(
self.railworks_path, '')
extracted += 1
percent_complete = extracted / to_be_extracted
self.output('[{}/{} {}] {}\r'.format(extracted,
to_be_extracted, (round(percent_complete * 10) * '*').
ljust(10), extracted_path[-55:]))
self.output('\n\n{} extracted successfully'.format(os.path.
basename(target)))
def get_railworks_path(self):
steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
'Software\\Valve\\Steam')
steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]
return os.path.join(steam_path, 'steamApps', 'common', 'railworks')
def output(self, out, wait=False):
if wait:
input(out)
else:
sys.stdout.write(out)
def main(self):
targets = sys.argv[1:]
if not targets:
return self.output('No RWP files passed.', wait=True)
self.railworks_path = self.get_railworks_path()
for target in targets:
self.extract(target)
self.output('\n\nAll done. Thanks for using RWP Installer.', wait=True)
if __name__ == '__main__':
RwpInstaller().main()
<|reserved_special_token_1|>
import os
import sys
import winreg
import zipfile
class RwpInstaller:
railworks_path = None
def extract(self, target):
with zipfile.ZipFile(target) as z:
if z.testzip():
return self.output('Corrupt file {}\n'.format(target))
self.output('{} file valid\n\n'.format(target))
extracted = 0
to_be_extracted = len(z.infolist())
for file in z.infolist():
extracted_path = z.extract(file, self.railworks_path).replace(
self.railworks_path, '')
extracted += 1
percent_complete = extracted / to_be_extracted
self.output('[{}/{} {}] {}\r'.format(extracted,
to_be_extracted, (round(percent_complete * 10) * '*').
ljust(10), extracted_path[-55:]))
self.output('\n\n{} extracted successfully'.format(os.path.
basename(target)))
def get_railworks_path(self):
steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
'Software\\Valve\\Steam')
steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]
return os.path.join(steam_path, 'steamApps', 'common', 'railworks')
def output(self, out, wait=False):
if wait:
input(out)
else:
sys.stdout.write(out)
def main(self):
targets = sys.argv[1:]
if not targets:
return self.output('No RWP files passed.', wait=True)
self.railworks_path = self.get_railworks_path()
for target in targets:
self.extract(target)
self.output('\n\nAll done. Thanks for using RWP Installer.', wait=True)
if __name__ == '__main__':
RwpInstaller().main()
|
flexible
|
{
"blob_id": "9c751dece67ef33ba8e5cb8281f024d2143e0808",
"index": 8811,
"step-1": "<mask token>\n\n\nclass RwpInstaller:\n <mask token>\n\n def extract(self, target):\n with zipfile.ZipFile(target) as z:\n if z.testzip():\n return self.output('Corrupt file {}\\n'.format(target))\n self.output('{} file valid\\n\\n'.format(target))\n extracted = 0\n to_be_extracted = len(z.infolist())\n for file in z.infolist():\n extracted_path = z.extract(file, self.railworks_path).replace(\n self.railworks_path, '')\n extracted += 1\n percent_complete = extracted / to_be_extracted\n self.output('[{}/{} {}] {}\\r'.format(extracted,\n to_be_extracted, (round(percent_complete * 10) * '*').\n ljust(10), extracted_path[-55:]))\n self.output('\\n\\n{} extracted successfully'.format(os.path.\n basename(target)))\n\n def get_railworks_path(self):\n steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\n 'Software\\\\Valve\\\\Steam')\n steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]\n return os.path.join(steam_path, 'steamApps', 'common', 'railworks')\n\n def output(self, out, wait=False):\n if wait:\n input(out)\n else:\n sys.stdout.write(out)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RwpInstaller:\n railworks_path = None\n\n def extract(self, target):\n with zipfile.ZipFile(target) as z:\n if z.testzip():\n return self.output('Corrupt file {}\\n'.format(target))\n self.output('{} file valid\\n\\n'.format(target))\n extracted = 0\n to_be_extracted = len(z.infolist())\n for file in z.infolist():\n extracted_path = z.extract(file, self.railworks_path).replace(\n self.railworks_path, '')\n extracted += 1\n percent_complete = extracted / to_be_extracted\n self.output('[{}/{} {}] {}\\r'.format(extracted,\n to_be_extracted, (round(percent_complete * 10) * '*').\n ljust(10), extracted_path[-55:]))\n self.output('\\n\\n{} extracted successfully'.format(os.path.\n basename(target)))\n\n def get_railworks_path(self):\n steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\n 'Software\\\\Valve\\\\Steam')\n steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]\n return os.path.join(steam_path, 'steamApps', 'common', 'railworks')\n\n def output(self, out, wait=False):\n if wait:\n input(out)\n else:\n sys.stdout.write(out)\n\n def main(self):\n targets = sys.argv[1:]\n if not targets:\n return self.output('No RWP files passed.', wait=True)\n self.railworks_path = self.get_railworks_path()\n for target in targets:\n self.extract(target)\n self.output('\\n\\nAll done. Thanks for using RWP Installer.', wait=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RwpInstaller:\n railworks_path = None\n\n def extract(self, target):\n with zipfile.ZipFile(target) as z:\n if z.testzip():\n return self.output('Corrupt file {}\\n'.format(target))\n self.output('{} file valid\\n\\n'.format(target))\n extracted = 0\n to_be_extracted = len(z.infolist())\n for file in z.infolist():\n extracted_path = z.extract(file, self.railworks_path).replace(\n self.railworks_path, '')\n extracted += 1\n percent_complete = extracted / to_be_extracted\n self.output('[{}/{} {}] {}\\r'.format(extracted,\n to_be_extracted, (round(percent_complete * 10) * '*').\n ljust(10), extracted_path[-55:]))\n self.output('\\n\\n{} extracted successfully'.format(os.path.\n basename(target)))\n\n def get_railworks_path(self):\n steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\n 'Software\\\\Valve\\\\Steam')\n steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]\n return os.path.join(steam_path, 'steamApps', 'common', 'railworks')\n\n def output(self, out, wait=False):\n if wait:\n input(out)\n else:\n sys.stdout.write(out)\n\n def main(self):\n targets = sys.argv[1:]\n if not targets:\n return self.output('No RWP files passed.', wait=True)\n self.railworks_path = self.get_railworks_path()\n for target in targets:\n self.extract(target)\n self.output('\\n\\nAll done. Thanks for using RWP Installer.', wait=True)\n\n\nif __name__ == '__main__':\n RwpInstaller().main()\n",
"step-4": "import os\nimport sys\nimport winreg\nimport zipfile\n\n\nclass RwpInstaller:\n railworks_path = None\n\n def extract(self, target):\n with zipfile.ZipFile(target) as z:\n if z.testzip():\n return self.output('Corrupt file {}\\n'.format(target))\n self.output('{} file valid\\n\\n'.format(target))\n extracted = 0\n to_be_extracted = len(z.infolist())\n for file in z.infolist():\n extracted_path = z.extract(file, self.railworks_path).replace(\n self.railworks_path, '')\n extracted += 1\n percent_complete = extracted / to_be_extracted\n self.output('[{}/{} {}] {}\\r'.format(extracted,\n to_be_extracted, (round(percent_complete * 10) * '*').\n ljust(10), extracted_path[-55:]))\n self.output('\\n\\n{} extracted successfully'.format(os.path.\n basename(target)))\n\n def get_railworks_path(self):\n steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\n 'Software\\\\Valve\\\\Steam')\n steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]\n return os.path.join(steam_path, 'steamApps', 'common', 'railworks')\n\n def output(self, out, wait=False):\n if wait:\n input(out)\n else:\n sys.stdout.write(out)\n\n def main(self):\n targets = sys.argv[1:]\n if not targets:\n return self.output('No RWP files passed.', wait=True)\n self.railworks_path = self.get_railworks_path()\n for target in targets:\n self.extract(target)\n self.output('\\n\\nAll done. Thanks for using RWP Installer.', wait=True)\n\n\nif __name__ == '__main__':\n RwpInstaller().main()\n",
"step-5": null,
"step-ids": [
4,
6,
7,
8
]
}
|
[
4,
6,
7,
8
] |
class Library(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def cache_key(self, key):
return self._backend.cache_key(key)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Library(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
<|reserved_special_token_1|>
class Library(object):
def __init__(self, backend):
self._backend = backend
<|reserved_special_token_0|>
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
<|reserved_special_token_1|>
class Library(object):
def __init__(self, backend):
self._backend = backend
@property
def cache(self):
return self._backend.cache
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
class Library(object):
def __init__(self, backend):
self._backend = backend
@property
def cache(self):
return self._backend.cache
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
|
flexible
|
{
"blob_id": "ccee0e3c47fd3809e0670be24aaa6fd0a9bad3bc",
"index": 888,
"step-1": "class Library(object):\n <mask token>\n <mask token>\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n <mask token>\n",
"step-2": "class Library(object):\n <mask token>\n <mask token>\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-3": "class Library(object):\n\n def __init__(self, backend):\n self._backend = backend\n <mask token>\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-4": "class Library(object):\n\n def __init__(self, backend):\n self._backend = backend\n\n @property\n def cache(self):\n return self._backend.cache\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nclass Library(object):\n\n def __init__(self, backend):\n self._backend = backend\n\n @property\n def cache(self):\n return self._backend.cache\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
df = pd.read_csv("search.csv")
df0 = df[df['re_0']<df['re_1']]
df1 = df[df['re_0']>df['re_1']].ix[:, ['re_1', 'im_1', 're_0', 'im_0']]
df1.columns = ['re_0', 'im_0', 're_1', 'im_1']
df = pd.concat([df0, df1]).sort_values(by=["re_0"])
eps = pow(10.0, -4.0)
first = True
res = []
val_old = None
for (k, val) in df.iterrows():
z0 = val['re_0']+1.0j*val['im_0']
z1 = val['re_1']+1.0j*val['im_1']
if (first):
res.append([z0, z1])
first = False
else:
z0_old = val_old['re_0']+1.0j*val_old['im_0']
z1_old = val_old['re_1']+1.0j*val_old['im_1']
print k, z0, z1, abs(z0_old-z0)+ abs(z1_old-z1)
if(abs(z0_old-z0) + abs(z1_old-z1) >eps):
res.append([z0, z1])
val_old = val
f = open('filtered.csv', 'w')
for [z0, z1] in res:
print >>f, "{0},{1},{2},{3}".format(z0.real, z0.imag, z1.real, z1.imag)
"""
for i in range(len(df)-1):
print i
z0 = df.ix[i,:]['re_0'] + 1.0j * df.ix[i,:]['im_0']
z1 = df.ix[i,:]['re_1'] + 1.0j * df.ix[i,:]['im_1']
z0p = df.ix[i+1,:]['re_0'] + 1.0j * df.ix[i+1,:]['im_0']
z1p = df.ix[i+1,:]['re_1'] + 1.0j * df.ix[i+1,:]['im_1']
if(abs(z0-z0p)>eps and abs(z1-z1p)>eps):
res.append([z0p, z1p])
print res
print len(df)
"""
|
normal
|
{
"blob_id": "709e54daea4fea112539af3da83b00a43a086399",
"index": 2629,
"step-1": "import pandas as pd\n\ndf = pd.read_csv(\"search.csv\")\n\n\ndf0 = df[df['re_0']<df['re_1']]\ndf1 = df[df['re_0']>df['re_1']].ix[:, ['re_1', 'im_1', 're_0', 'im_0']]\ndf1.columns = ['re_0', 'im_0', 're_1', 'im_1']\ndf = pd.concat([df0, df1]).sort_values(by=[\"re_0\"])\n\neps = pow(10.0, -4.0)\nfirst = True\nres = []\nval_old = None\nfor (k, val) in df.iterrows():\n z0 = val['re_0']+1.0j*val['im_0']\n z1 = val['re_1']+1.0j*val['im_1']\n\n if (first):\n res.append([z0, z1])\n first = False\n else:\n z0_old = val_old['re_0']+1.0j*val_old['im_0']\n z1_old = val_old['re_1']+1.0j*val_old['im_1']\n print k, z0, z1, abs(z0_old-z0)+ abs(z1_old-z1)\n if(abs(z0_old-z0) + abs(z1_old-z1) >eps):\n res.append([z0, z1])\n \n val_old = val\n\nf = open('filtered.csv', 'w')\nfor [z0, z1] in res:\n print >>f, \"{0},{1},{2},{3}\".format(z0.real, z0.imag, z1.real, z1.imag)\n \n\"\"\"\nfor i in range(len(df)-1):\n print i\n z0 = df.ix[i,:]['re_0'] + 1.0j * df.ix[i,:]['im_0']\n z1 = df.ix[i,:]['re_1'] + 1.0j * df.ix[i,:]['im_1']\n z0p = df.ix[i+1,:]['re_0'] + 1.0j * df.ix[i+1,:]['im_0']\n z1p = df.ix[i+1,:]['re_1'] + 1.0j * df.ix[i+1,:]['im_1']\n if(abs(z0-z0p)>eps and abs(z1-z1p)>eps):\n res.append([z0p, z1p])\n\nprint res\nprint len(df)\n\n\"\"\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from auction_type import AuctionType
from bid import Bid
class Auction(object):
def __init__(self, name, type, status, start_price, buy_now_price):
self.name = name
self.type = type
self.status = status
if AuctionType.BID == type:
self.start_price = start_price
self.bids = []
if AuctionType.BUY_NOW == type:
self.buy_now_price = buy_now_price
def add_bid(self, price):
self.bids.append(Bid(price))
|
normal
|
{
"blob_id": "9e05f883d80d7583c9f7e16b2fb5d3f67896388d",
"index": 5629,
"step-1": "<mask token>\n\n\nclass Auction(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n\n def add_bid(self, price):\n self.bids.append(Bid(price))\n",
"step-4": "from auction_type import AuctionType\nfrom bid import Bid\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n\n def add_bid(self, price):\n self.bids.append(Bid(price))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from metricsManager import MetricsManager
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "4e8a5b0ba13921fb88d5d6371d50e7120ab01265",
"index": 737,
"step-1": "<mask token>\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from metricsManager import MetricsManager\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from metricsManager import MetricsManager\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_df_4_model(user_id, n_recommendations=20000):
"""this function generates the latent dataframes used for the prediction model"""
print('Generating dataframe for recommendation model')
recipes_df_raw = pd.read_csv(
'data/preprocessed/recipe_pp_20201118_1206.csv')
reviews_df_raw = pd.read_csv(
'data/preprocessed/review_pp_20201118_1206.csv')
print(
f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'
)
user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].
recipe_id)
sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(
user_rates)].sample(n=n_recommendations, random_state=1)
recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)
]
recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)
merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],
reviews_df_raw, on='recipe_id', how='right').dropna()
recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'
).first().reset_index()
reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()
print(len(user_rates))
print(sample_df_no_user.shape)
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(recipes_df['metadata'])
count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.
recipe_id.tolist())
n_red = 250
svd = TruncatedSVD(n_components=n_red)
latent_df = svd.fit_transform(count_df)
n = n_red
latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.
tolist())
latent_df
ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=
'recipe_id', how='right')
ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=
'rating').fillna(0)
svd = TruncatedSVD(n_components=800)
latent_df_2 = svd.fit_transform(ratings)
index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()
latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)
latent_df.to_csv(f'data/latents/latent_content.csv', index=True)
latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)
return latent_df, latent_df_2, user_rates
def get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):
v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)
v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)
sim1 = cosine_similarity(latent_1, v1).reshape(-1)
sim2 = cosine_similarity(latent_2, v2).reshape(-1)
hybrid = (sim1 + sim2) / 2.0
dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}
recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)
recommendation_df.sort_values('hybrid', ascending=False, inplace=True)
recommendation_df.head(10)
return recommendation_df.head(n_recommendations).reset_index().rename(
columns={'index': 'recipe_id'})
def get_user_recommendations(user_id, n_recommendations=500):
"""thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,
getting the recommendation based on each recipe and then summing the scores"""
latent_1, latent_2, recipe_list = get_df_4_model(user_id)
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
return grouped_recommendations
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_df_4_model(user_id, n_recommendations=20000):
"""this function generates the latent dataframes used for the prediction model"""
print('Generating dataframe for recommendation model')
recipes_df_raw = pd.read_csv(
'data/preprocessed/recipe_pp_20201118_1206.csv')
reviews_df_raw = pd.read_csv(
'data/preprocessed/review_pp_20201118_1206.csv')
print(
f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'
)
user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].
recipe_id)
sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(
user_rates)].sample(n=n_recommendations, random_state=1)
recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)
]
recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)
merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],
reviews_df_raw, on='recipe_id', how='right').dropna()
recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'
).first().reset_index()
reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()
print(len(user_rates))
print(sample_df_no_user.shape)
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(recipes_df['metadata'])
count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.
recipe_id.tolist())
n_red = 250
svd = TruncatedSVD(n_components=n_red)
latent_df = svd.fit_transform(count_df)
n = n_red
latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.
tolist())
latent_df
ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=
'recipe_id', how='right')
ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=
'rating').fillna(0)
svd = TruncatedSVD(n_components=800)
latent_df_2 = svd.fit_transform(ratings)
index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()
latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)
latent_df.to_csv(f'data/latents/latent_content.csv', index=True)
latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)
return latent_df, latent_df_2, user_rates
def get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):
v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)
v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)
sim1 = cosine_similarity(latent_1, v1).reshape(-1)
sim2 = cosine_similarity(latent_2, v2).reshape(-1)
hybrid = (sim1 + sim2) / 2.0
dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}
recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)
recommendation_df.sort_values('hybrid', ascending=False, inplace=True)
recommendation_df.head(10)
return recommendation_df.head(n_recommendations).reset_index().rename(
columns={'index': 'recipe_id'})
def get_user_recommendations(user_id, n_recommendations=500):
"""thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,
getting the recommendation based on each recipe and then summing the scores"""
latent_1, latent_2, recipe_list = get_df_4_model(user_id)
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
return grouped_recommendations
def get_superuser_recommendation(n_recommendations=100):
user_id = 424680
latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations
)
recipe_list = recipe_list[0:10]
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
print(
f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'
)
return grouped_recommendations[0:30]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_df_4_model(user_id, n_recommendations=20000):
"""this function generates the latent dataframes used for the prediction model"""
print('Generating dataframe for recommendation model')
recipes_df_raw = pd.read_csv(
'data/preprocessed/recipe_pp_20201118_1206.csv')
reviews_df_raw = pd.read_csv(
'data/preprocessed/review_pp_20201118_1206.csv')
print(
f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'
)
user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].
recipe_id)
sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(
user_rates)].sample(n=n_recommendations, random_state=1)
recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)
]
recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)
merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],
reviews_df_raw, on='recipe_id', how='right').dropna()
recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'
).first().reset_index()
reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()
print(len(user_rates))
print(sample_df_no_user.shape)
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(recipes_df['metadata'])
count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.
recipe_id.tolist())
n_red = 250
svd = TruncatedSVD(n_components=n_red)
latent_df = svd.fit_transform(count_df)
n = n_red
latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.
tolist())
latent_df
ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=
'recipe_id', how='right')
ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=
'rating').fillna(0)
svd = TruncatedSVD(n_components=800)
latent_df_2 = svd.fit_transform(ratings)
index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()
latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)
latent_df.to_csv(f'data/latents/latent_content.csv', index=True)
latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)
return latent_df, latent_df_2, user_rates
def get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):
v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)
v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)
sim1 = cosine_similarity(latent_1, v1).reshape(-1)
sim2 = cosine_similarity(latent_2, v2).reshape(-1)
hybrid = (sim1 + sim2) / 2.0
dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}
recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)
recommendation_df.sort_values('hybrid', ascending=False, inplace=True)
recommendation_df.head(10)
return recommendation_df.head(n_recommendations).reset_index().rename(
columns={'index': 'recipe_id'})
def get_user_recommendations(user_id, n_recommendations=500):
"""thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,
getting the recommendation based on each recipe and then summing the scores"""
latent_1, latent_2, recipe_list = get_df_4_model(user_id)
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
return grouped_recommendations
def get_superuser_recommendation(n_recommendations=100):
user_id = 424680
latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations
)
recipe_list = recipe_list[0:10]
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
print(
f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'
)
return grouped_recommendations[0:30]
if __name__ == '__main__':
result = get_superuser_recommendation(n_recommendations=4000)
print('Here are the top results for the user:')
print(result)
<|reserved_special_token_1|>
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
def get_df_4_model(user_id, n_recommendations=20000):
"""this function generates the latent dataframes used for the prediction model"""
print('Generating dataframe for recommendation model')
recipes_df_raw = pd.read_csv(
'data/preprocessed/recipe_pp_20201118_1206.csv')
reviews_df_raw = pd.read_csv(
'data/preprocessed/review_pp_20201118_1206.csv')
print(
f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'
)
user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].
recipe_id)
sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(
user_rates)].sample(n=n_recommendations, random_state=1)
recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)
]
recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)
merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],
reviews_df_raw, on='recipe_id', how='right').dropna()
recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'
).first().reset_index()
reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()
print(len(user_rates))
print(sample_df_no_user.shape)
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(recipes_df['metadata'])
count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.
recipe_id.tolist())
n_red = 250
svd = TruncatedSVD(n_components=n_red)
latent_df = svd.fit_transform(count_df)
n = n_red
latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.
tolist())
latent_df
ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=
'recipe_id', how='right')
ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=
'rating').fillna(0)
svd = TruncatedSVD(n_components=800)
latent_df_2 = svd.fit_transform(ratings)
index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()
latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)
latent_df.to_csv(f'data/latents/latent_content.csv', index=True)
latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)
return latent_df, latent_df_2, user_rates
def get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):
v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)
v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)
sim1 = cosine_similarity(latent_1, v1).reshape(-1)
sim2 = cosine_similarity(latent_2, v2).reshape(-1)
hybrid = (sim1 + sim2) / 2.0
dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}
recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)
recommendation_df.sort_values('hybrid', ascending=False, inplace=True)
recommendation_df.head(10)
return recommendation_df.head(n_recommendations).reset_index().rename(
columns={'index': 'recipe_id'})
def get_user_recommendations(user_id, n_recommendations=500):
"""thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,
getting the recommendation based on each recipe and then summing the scores"""
latent_1, latent_2, recipe_list = get_df_4_model(user_id)
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
return grouped_recommendations
def get_superuser_recommendation(n_recommendations=100):
user_id = 424680
latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations
)
recipe_list = recipe_list[0:10]
recommendations = [get_one_recommendation(i, latent_1, latent_2,
n_recommendations) for i in recipe_list]
recommendations_df = pd.concat(recommendations)
grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(
).sort_values(by='hybrid', ascending=False)
print(
f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'
)
return grouped_recommendations[0:30]
if __name__ == '__main__':
result = get_superuser_recommendation(n_recommendations=4000)
print('Here are the top results for the user:')
print(result)
<|reserved_special_token_1|>
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
def get_df_4_model(user_id, n_recommendations = 20000):
'''this function generates the latent dataframes used for the prediction model'''
# First the data needs to be loaded
print('Generating dataframe for recommendation model')
recipes_df_raw = pd.read_csv("data/preprocessed/recipe_pp_20201118_1206.csv")#.sample(n=n_recommendations, random_state=1)
reviews_df_raw = pd.read_csv("data/preprocessed/review_pp_20201118_1206.csv")
print(f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation')
# !! currently the df is way to big, so we need to take a sample, but ensure that the recipes the user likes are used for finding similarities later
# For this I will create a sample df without user recipes and concatenate the a df with only user liked recipes
user_rates =list(reviews_df_raw[reviews_df_raw.user_id == user_id].recipe_id) # generate a list of user rated recipes
sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(user_rates)].sample(n=n_recommendations, random_state=1)
recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)]
recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)
merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']], reviews_df_raw, on="recipe_id", how="right").dropna()
recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by="recipe_id").first().reset_index()
reviews_df = merge_df.drop(['metadata'], axis="columns").reset_index()
print(len(user_rates))
print(sample_df_no_user.shape)
#Using CountVectorizer to encode metadata into column
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(recipes_df['metadata'])
#Create a new dataframe count_df with the vectors you get from this count transformation.
count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.recipe_id.tolist())
#reduce dimensionality
n_red = 250 # reduction factor
svd = TruncatedSVD(n_components=n_red)
latent_df = svd.fit_transform(count_df)
n = n_red
latent_df = pd.DataFrame(latent_df[:,0:n], index=recipes_df.recipe_id.tolist())
latent_df
# start recommendin similar recipes on the basis of user ratings (item-item collaborative filtering
#### -> old: ratings = reviews_df.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)
#
ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on="recipe_id", how="right")
ratings = ratings1.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)
svd = TruncatedSVD(n_components=800)
latent_df_2 = svd.fit_transform(ratings)
index_list = reviews_df.groupby(by="recipe_id").mean().index.tolist()
latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)
latent_df.to_csv(f'data/latents/latent_content.csv', index=True)
latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)
return latent_df, latent_df_2, user_rates
def get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):
# applying Cosine similarity
# Get the latent vectors for recipe_id:"45119" from content and collaborative matrices
v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)
v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)
# Compute the cosine similartity of this movie with the others in the list
sim1 = cosine_similarity(latent_1, v1).reshape(-1)
sim2 = cosine_similarity(latent_2, v2).reshape(-1)
hybrid = ((sim1 + sim2)/2.0)
dictDf = {'content': sim1 , 'collaborative': sim2, 'hybrid': hybrid}
recommendation_df = pd.DataFrame(dictDf, index = latent_1.index)
recommendation_df.sort_values('hybrid', ascending=False, inplace=True)
recommendation_df.head(10)
return recommendation_df.head(n_recommendations).reset_index().rename(columns={"index":"recipe_id"})
def get_user_recommendations(user_id, n_recommendations = 500):
'''thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,
getting the recommendation based on each recipe and then summing the scores'''
# !!!!!!!!!! this function still assumes the user ONLY liked recipes
# !!!!!!!!!! No dislikes are considered so far!
latent_1, latent_2, recipe_list = get_df_4_model(user_id)#, n_recommendations)
recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]
#concetenate the list to a big df
recommendations_df=pd.concat(recommendations)
# sum the scores using groupby
grouped_recommendations= recommendations_df.groupby(by="recipe_id").sum().sort_values(by="hybrid", ascending=False)
return grouped_recommendations
#return recipe_list
def get_superuser_recommendation(n_recommendations=100):
user_id = 424680
latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations)
recipe_list = recipe_list[0:10]
recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]
#concetenate the list to a big df
recommendations_df=pd.concat(recommendations)
# sum the scores using groupby
grouped_recommendations= recommendations_df.groupby(by="recipe_id").sum().sort_values(by="hybrid", ascending=False)
print(f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked')
return grouped_recommendations[0:30]
if __name__ == "__main__":
result = get_superuser_recommendation(n_recommendations=4000)
print('Here are the top results for the user:')
print(result)
|
flexible
|
{
"blob_id": "5c8de06176d06c5a2cf78ac138a5cb35e168d617",
"index": 5122,
"step-1": "<mask token>\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations\n )\n recipe_list = recipe_list[0:10]\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n print(\n f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'\n )\n return grouped_recommendations[0:30]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations\n )\n recipe_list = recipe_list[0:10]\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n print(\n f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'\n )\n return grouped_recommendations[0:30]\n\n\nif __name__ == '__main__':\n result = get_superuser_recommendation(n_recommendations=4000)\n print('Here are the top results for the user:')\n print(result)\n",
"step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations\n )\n recipe_list = recipe_list[0:10]\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n print(\n f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'\n )\n return grouped_recommendations[0:30]\n\n\nif __name__ == '__main__':\n result = get_superuser_recommendation(n_recommendations=4000)\n print('Here are the top results for the user:')\n print(result)\n",
"step-5": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n\n\n\ndef get_df_4_model(user_id, n_recommendations = 20000):\n '''this function generates the latent dataframes used for the prediction model'''\n # First the data needs to be loaded\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\"data/preprocessed/recipe_pp_20201118_1206.csv\")#.sample(n=n_recommendations, random_state=1)\n reviews_df_raw = pd.read_csv(\"data/preprocessed/review_pp_20201118_1206.csv\")\n print(f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation')\n # !! currently the df is way to big, so we need to take a sample, but ensure that the recipes the user likes are used for finding similarities later\n # For this I will create a sample df without user recipes and concatenate the a df with only user liked recipes\n\n user_rates =list(reviews_df_raw[reviews_df_raw.user_id == user_id].recipe_id) # generate a list of user rated recipes\n\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)]\n\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']], reviews_df_raw, on=\"recipe_id\", how=\"right\").dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by=\"recipe_id\").first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis=\"columns\").reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n #Using CountVectorizer to encode metadata into column\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n #Create a new dataframe count_df with the vectors you get from this count transformation.\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.recipe_id.tolist())\n #reduce dimensionality\n n_red = 250 # reduction factor\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n\n n = n_red\n latent_df = pd.DataFrame(latent_df[:,0:n], index=recipes_df.recipe_id.tolist())\n latent_df\n\n # start recommendin similar recipes on the basis of user ratings (item-item collaborative filtering\n #### -> old: ratings = reviews_df.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)\n #\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\"recipe_id\", how=\"right\")\n\n ratings = ratings1.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)\n\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n\n index_list = reviews_df.groupby(by=\"recipe_id\").mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n\n\n return latent_df, latent_df_2, user_rates\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n # applying Cosine similarity\n # Get the latent vectors for recipe_id:\"45119\" from content and collaborative matrices\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n\n# Compute the cosine similartity of this movie with the others in the list\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n\n hybrid = ((sim1 + sim2)/2.0)\n\n dictDf = {'content': sim1 , 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index = latent_1.index)\n\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n\n return recommendation_df.head(n_recommendations).reset_index().rename(columns={\"index\":\"recipe_id\"})\n\ndef get_user_recommendations(user_id, n_recommendations = 500):\n '''thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores'''\n\n # !!!!!!!!!! this function still assumes the user ONLY liked recipes\n # !!!!!!!!!! No dislikes are considered so far!\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)#, n_recommendations)\n\n recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]\n #concetenate the list to a big df\n recommendations_df=pd.concat(recommendations)\n # sum the scores using groupby\n grouped_recommendations= recommendations_df.groupby(by=\"recipe_id\").sum().sort_values(by=\"hybrid\", ascending=False)\n return grouped_recommendations\n #return recipe_list\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations)\n\n recipe_list = recipe_list[0:10]\n\n recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]\n #concetenate the list to a big df\n recommendations_df=pd.concat(recommendations)\n # sum the scores using groupby\n grouped_recommendations= recommendations_df.groupby(by=\"recipe_id\").sum().sort_values(by=\"hybrid\", ascending=False)\n\n print(f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked')\n\n return grouped_recommendations[0:30]\n\n\nif __name__ == \"__main__\":\n\n result = get_superuser_recommendation(n_recommendations=4000)\n\n print('Here are the top results for the user:')\n print(result)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = RootGUI()
root.mainloop()
<|reserved_special_token_1|>
from RootGUI import RootGUI
root = RootGUI()
root.mainloop()
<|reserved_special_token_1|>
#This file was created by Tate Hagan
from RootGUI import RootGUI
root = RootGUI()
root.mainloop()
|
flexible
|
{
"blob_id": "d17081ef94df1e14308128341d040559edb81805",
"index": 7100,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = RootGUI()\nroot.mainloop()\n",
"step-4": "from RootGUI import RootGUI\nroot = RootGUI()\nroot.mainloop()\n",
"step-5": "#This file was created by Tate Hagan\r\n\r\nfrom RootGUI import RootGUI\r\n\r\nroot = RootGUI()\r\nroot.mainloop()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ocr_helpers import FilePathResolver, ProblemsWriter
from ocr_google_client import CfaProblemsBuilder
from ocr_google_client_2016 import ParserTwoThousandSixteenAnswers, ParserTwoThousandSixteenQuestions
def resolve_build_and_write(year, day_part, file_part, nb_blocks_footer=0, nb_words_footer=0, headers=None, skip_nb_page=0, parser=None, indentation_threshold=15):
resolver = FilePathResolver(year, day_part, file_part)
jpeg_filepaths = resolver.resolve_sorted_paths()
jpeg_filepaths = jpeg_filepaths[skip_nb_page:]
builder = CfaProblemsBuilder(parser=parser, headers=headers, nb_blocks_footer=nb_blocks_footer, nb_words_footer=nb_words_footer, indentation_threshold=indentation_threshold)
problems = builder.build_problems(jpeg_filepaths)
writer = ProblemsWriter()
writer.write_problems(resolver.get_xml_result_file(), problems)
# 2014 afternoon
# headers = ["7476229133318632 March Mock Exam - PM March Mock Exam - PM 399388"]
# resolve_build_and_write('2014', 'afternoon', 'answer', nb_blocks_footer=1, headers=headers, indentation_threshold=25)
# 2014 morning
# base_header = '3172168919041893 March Mock Exam - AM 399388'
# headers = ["|" + base_header, base_header]
# resolve_build_and_write('2014', 'morning', 'answer', nb_blocks_footer=1, headers=headers)
# 2015 afternoon
# headers = ['2015 Level I Mock Exam PM Questions and Answers']
# resolve_build_and_write('2015', 'afternoon', 'answer', nb_blocks_footer=1, headers=headers)
# 2015 morning
# headers = ['2015 Level I Mock Exam AM Questions and Answers']
# resolve_build_and_write('2015', 'morning', 'answer', nb_blocks_footer=1, headers=headers)
# 2016 afternoon answer
# headers = ['CFA level1-Mock-114']
# parser = ParserTwoThousandSixteenAnswers(17)
# resolve_build_and_write('2016', 'afternoon_answer', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)
# 2016 afternoon questions
# headers = ['CFA level1-Mock-114', 'CFA levell-Mock-114']
# parser = ParserTwoThousandSixteenQuestions(17)
# resolve_build_and_write('2016', 'afternoon_question', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)
#
# 2016 morning answer
# headers = ['CFA level1-Mock-113']
# parser = ParserTwoThousandSixteenAnswers(17)
# resolve_build_and_write('2016', 'morning_answer', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)
# 2016 afternoon questions
# headers = ['CFA level1-Mock-113', 'CFA levell-Mock-113']
# parser = ParserTwoThousandSixteenQuestions(17)
# resolve_build_and_write('2016', 'morning_question', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)
# 2017 afternoon
#resolve_build_and_write('2017', 'morning', 'answer', skip_nb_page=1, nb_blocks_footer=2)
# 2017 afternoon
resolve_build_and_write('2017', 'afternoon', 'answer', skip_nb_page=1, nb_blocks_footer=2)
|
normal
|
{
"blob_id": "ab3d443c60ca8ee82f594ae04e9b485a53d53f36",
"index": 5665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef resolve_build_and_write(year, day_part, file_part, nb_blocks_footer=0,\n nb_words_footer=0, headers=None, skip_nb_page=0, parser=None,\n indentation_threshold=15):\n resolver = FilePathResolver(year, day_part, file_part)\n jpeg_filepaths = resolver.resolve_sorted_paths()\n jpeg_filepaths = jpeg_filepaths[skip_nb_page:]\n builder = CfaProblemsBuilder(parser=parser, headers=headers,\n nb_blocks_footer=nb_blocks_footer, nb_words_footer=nb_words_footer,\n indentation_threshold=indentation_threshold)\n problems = builder.build_problems(jpeg_filepaths)\n writer = ProblemsWriter()\n writer.write_problems(resolver.get_xml_result_file(), problems)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef resolve_build_and_write(year, day_part, file_part, nb_blocks_footer=0,\n nb_words_footer=0, headers=None, skip_nb_page=0, parser=None,\n indentation_threshold=15):\n resolver = FilePathResolver(year, day_part, file_part)\n jpeg_filepaths = resolver.resolve_sorted_paths()\n jpeg_filepaths = jpeg_filepaths[skip_nb_page:]\n builder = CfaProblemsBuilder(parser=parser, headers=headers,\n nb_blocks_footer=nb_blocks_footer, nb_words_footer=nb_words_footer,\n indentation_threshold=indentation_threshold)\n problems = builder.build_problems(jpeg_filepaths)\n writer = ProblemsWriter()\n writer.write_problems(resolver.get_xml_result_file(), problems)\n\n\nresolve_build_and_write('2017', 'afternoon', 'answer', skip_nb_page=1,\n nb_blocks_footer=2)\n",
"step-4": "from ocr_helpers import FilePathResolver, ProblemsWriter\nfrom ocr_google_client import CfaProblemsBuilder\nfrom ocr_google_client_2016 import ParserTwoThousandSixteenAnswers, ParserTwoThousandSixteenQuestions\n\n\ndef resolve_build_and_write(year, day_part, file_part, nb_blocks_footer=0,\n nb_words_footer=0, headers=None, skip_nb_page=0, parser=None,\n indentation_threshold=15):\n resolver = FilePathResolver(year, day_part, file_part)\n jpeg_filepaths = resolver.resolve_sorted_paths()\n jpeg_filepaths = jpeg_filepaths[skip_nb_page:]\n builder = CfaProblemsBuilder(parser=parser, headers=headers,\n nb_blocks_footer=nb_blocks_footer, nb_words_footer=nb_words_footer,\n indentation_threshold=indentation_threshold)\n problems = builder.build_problems(jpeg_filepaths)\n writer = ProblemsWriter()\n writer.write_problems(resolver.get_xml_result_file(), problems)\n\n\nresolve_build_and_write('2017', 'afternoon', 'answer', skip_nb_page=1,\n nb_blocks_footer=2)\n",
"step-5": "from ocr_helpers import FilePathResolver, ProblemsWriter\nfrom ocr_google_client import CfaProblemsBuilder\nfrom ocr_google_client_2016 import ParserTwoThousandSixteenAnswers, ParserTwoThousandSixteenQuestions\n\n\ndef resolve_build_and_write(year, day_part, file_part, nb_blocks_footer=0, nb_words_footer=0, headers=None, skip_nb_page=0, parser=None, indentation_threshold=15):\n resolver = FilePathResolver(year, day_part, file_part)\n jpeg_filepaths = resolver.resolve_sorted_paths()\n jpeg_filepaths = jpeg_filepaths[skip_nb_page:]\n\n builder = CfaProblemsBuilder(parser=parser, headers=headers, nb_blocks_footer=nb_blocks_footer, nb_words_footer=nb_words_footer, indentation_threshold=indentation_threshold)\n problems = builder.build_problems(jpeg_filepaths)\n\n writer = ProblemsWriter()\n writer.write_problems(resolver.get_xml_result_file(), problems)\n\n\n# 2014 afternoon\n# headers = [\"7476229133318632 March Mock Exam - PM March Mock Exam - PM 399388\"]\n# resolve_build_and_write('2014', 'afternoon', 'answer', nb_blocks_footer=1, headers=headers, indentation_threshold=25)\n\n# 2014 morning\n# base_header = '3172168919041893 March Mock Exam - AM 399388'\n# headers = [\"|\" + base_header, base_header]\n# resolve_build_and_write('2014', 'morning', 'answer', nb_blocks_footer=1, headers=headers)\n\n# 2015 afternoon\n# headers = ['2015 Level I Mock Exam PM Questions and Answers']\n# resolve_build_and_write('2015', 'afternoon', 'answer', nb_blocks_footer=1, headers=headers)\n\n# 2015 morning\n# headers = ['2015 Level I Mock Exam AM Questions and Answers']\n# resolve_build_and_write('2015', 'morning', 'answer', nb_blocks_footer=1, headers=headers)\n\n# 2016 afternoon answer\n# headers = ['CFA level1-Mock-114']\n# parser = ParserTwoThousandSixteenAnswers(17)\n# resolve_build_and_write('2016', 'afternoon_answer', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)\n\n# 2016 afternoon questions\n# headers = ['CFA level1-Mock-114', 'CFA levell-Mock-114']\n# parser = ParserTwoThousandSixteenQuestions(17)\n# resolve_build_and_write('2016', 'afternoon_question', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)\n#\n# 2016 morning answer\n# headers = ['CFA level1-Mock-113']\n# parser = ParserTwoThousandSixteenAnswers(17)\n# resolve_build_and_write('2016', 'morning_answer', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)\n\n# 2016 afternoon questions\n# headers = ['CFA level1-Mock-113', 'CFA levell-Mock-113']\n# parser = ParserTwoThousandSixteenQuestions(17)\n# resolve_build_and_write('2016', 'morning_question', '', skip_nb_page=1, headers=headers, nb_words_footer=3, parser=parser)\n\n# 2017 afternoon\n#resolve_build_and_write('2017', 'morning', 'answer', skip_nb_page=1, nb_blocks_footer=2)\n\n# 2017 afternoon\nresolve_build_and_write('2017', 'afternoon', 'answer', skip_nb_page=1, nb_blocks_footer=2)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(test_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
client = pymongo.MongoClient('localhost', 27017)
db = client['zhihu']
collection = db['zhihu']
document_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))}
test_id = collection.insert(document_test)
print(test_id)
<|reserved_special_token_1|>
import pymongo
import time
client = pymongo.MongoClient('localhost', 27017)
db = client['zhihu']
collection = db['zhihu']
document_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))}
test_id = collection.insert(document_test)
print(test_id)
<|reserved_special_token_1|>
import pymongo
import time
client = pymongo.MongoClient('localhost', 27017);
db = client['zhihu']; # 类似dict,若不存在,则新建;
# client.drop_database('zhihu') # 删除db
collection = db['zhihu']; # 若不存在,则新建;
# db.drop_collection('zhihu') # 删除collection
document_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))}
test_id = collection.insert(document_test);
# collection.find_one({'name': 'test'})
# collection.find({'name': 'test'}) 返回curser,可继续进行find,count等操作
# collection.update({'name': 'test'}, {'$set': {'name': 'test_update'}})
print(test_id)
|
flexible
|
{
"blob_id": "d1d293a5d2c394e69d93488605f27b5468220286",
"index": 6627,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(test_id)\n",
"step-3": "<mask token>\nclient = pymongo.MongoClient('localhost', 27017)\ndb = client['zhihu']\ncollection = db['zhihu']\ndocument_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))}\ntest_id = collection.insert(document_test)\nprint(test_id)\n",
"step-4": "import pymongo\nimport time\nclient = pymongo.MongoClient('localhost', 27017)\ndb = client['zhihu']\ncollection = db['zhihu']\ndocument_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))}\ntest_id = collection.insert(document_test)\nprint(test_id)\n",
"step-5": "import pymongo\nimport time \n\nclient = pymongo.MongoClient('localhost', 27017);\ndb = client['zhihu']; # 类似dict,若不存在,则新建;\n# client.drop_database('zhihu') # 删除db\n\ncollection = db['zhihu']; # 若不存在,则新建;\n# db.drop_collection('zhihu') # 删除collection\n\ndocument_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))}\ntest_id = collection.insert(document_test);\n# collection.find_one({'name': 'test'})\n# collection.find({'name': 'test'}) 返回curser,可继续进行find,count等操作\n# collection.update({'name': 'test'}, {'$set': {'name': 'test_update'}})\nprint(test_id)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(count)
print(count['b'])
print(count.most_common(1))
print(count.items())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
list1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']
count = Counter(list1)
print(count)
print(count['b'])
print(count.most_common(1))
print(count.items())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from collections import Counter
list1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']
count = Counter(list1)
print(count)
print(count['b'])
print(count.most_common(1))
print(count.items())
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:tom_tao626
@license: Apache Licence
@file: 17.列表中的元素统计.py
@time: 2020/12/09
@contact: tp320670258@gmail.com
@site: xxxx.suizhu.net
@software: PyCharm
"""
# collections.Counter()
from collections import Counter
list1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']
count = Counter(list1)
print(count)
# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})
print(count['b'])
# 3
# 出现次数最多的元素
print(count.most_common(1))
# [('b', 3)]
print(count.items())
# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])
|
flexible
|
{
"blob_id": "f2c592a0ea38d800510323a1001c646cdbecefff",
"index": 3009,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-3": "<mask token>\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-4": "<mask token>\nfrom collections import Counter\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-5": "#!/usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author:tom_tao626 \n@license: Apache Licence \n@file: 17.列表中的元素统计.py \n@time: 2020/12/09\n@contact: tp320670258@gmail.com\n@site: xxxx.suizhu.net\n@software: PyCharm \n\"\"\"\n\n# collections.Counter()\n\nfrom collections import Counter\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\n# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})\nprint(count['b'])\n# 3\n# 出现次数最多的元素\nprint(count.most_common(1))\n# [('b', 3)]\nprint(count.items())\n# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from PyQt5.QtWidgets import QHeaderView, QWidget
from presenters.studyings_presenter import StudyingsPresenter
from view.q_objects_view import QObjectsView
class QStudyingsView(QObjectsView):
def __init__(self, parent):
QWidget.__init__(self, parent)
QObjectsView.__init__(self, parent)
self.set_presenter(StudyingsPresenter(view=self))
def init_table(self):
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
|
normal
|
{
"blob_id": "f7174bf4e7612921e730ac87141c85654a2f2411",
"index": 6194,
"step-1": "<mask token>\n\n\nclass QStudyingsView(QObjectsView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass QStudyingsView(QObjectsView):\n <mask token>\n\n def init_table(self):\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n",
"step-3": "<mask token>\n\n\nclass QStudyingsView(QObjectsView):\n\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n QObjectsView.__init__(self, parent)\n self.set_presenter(StudyingsPresenter(view=self))\n\n def init_table(self):\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n",
"step-4": "from PyQt5.QtWidgets import QHeaderView, QWidget\nfrom presenters.studyings_presenter import StudyingsPresenter\nfrom view.q_objects_view import QObjectsView\n\n\nclass QStudyingsView(QObjectsView):\n\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n QObjectsView.__init__(self, parent)\n self.set_presenter(StudyingsPresenter(view=self))\n\n def init_table(self):\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import re
def match_regex(filename, regex):
with open(filename) as file:
lines = file.readlines()
for line in reversed(lines):
match = re.match(regex, line)
if match:
regex = yield match.groups()[0]
def get_serials(filename):
ERROR_RE = 'XFS ERROR (\[sd[a-z]\])'
# Create generator of XFS ERROR
matcher = match_regex(filename, ERROR_RE)
device = next(matcher)
while True:
# Create regex pattern for BUS INFO base on DEVICE got ERROR
bus_regex = '(sd \S+) {}.*'.format(re.escape(device))
print('bus_regex:', bus_regex)
# Send BUS regex to generator to get BUS info of ERROR
bus = matcher.send(bus_regex)
# Send SERIAL regex to generator to get SERIAL NO of DEVICE in ERROR
serial_regex = '{} \(SERIAL=([^)]*)\)'.format(bus)
print('serial_regex:', serial_regex)
serial = matcher.send(serial_regex)
yield serial
# Send ERROR regex to generator to get next DEVICE in ERROR
device = matcher.send(ERROR_RE)
def main():
filename = 'iter2/log2.txt'
print('List of serial no found: ')
for serial in get_serials(filename=filename):
print(serial)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a36a553342cfe605a97ddc0f636bbb73b683f6a6",
"index": 1239,
"step-1": "<mask token>\n\n\ndef match_regex(filename, regex):\n with open(filename) as file:\n lines = file.readlines()\n for line in reversed(lines):\n match = re.match(regex, line)\n if match:\n regex = yield match.groups()[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef match_regex(filename, regex):\n with open(filename) as file:\n lines = file.readlines()\n for line in reversed(lines):\n match = re.match(regex, line)\n if match:\n regex = yield match.groups()[0]\n\n\ndef get_serials(filename):\n ERROR_RE = 'XFS ERROR (\\\\[sd[a-z]\\\\])'\n matcher = match_regex(filename, ERROR_RE)\n device = next(matcher)\n while True:\n bus_regex = '(sd \\\\S+) {}.*'.format(re.escape(device))\n print('bus_regex:', bus_regex)\n bus = matcher.send(bus_regex)\n serial_regex = '{} \\\\(SERIAL=([^)]*)\\\\)'.format(bus)\n print('serial_regex:', serial_regex)\n serial = matcher.send(serial_regex)\n yield serial\n device = matcher.send(ERROR_RE)\n\n\ndef main():\n filename = 'iter2/log2.txt'\n print('List of serial no found: ')\n for serial in get_serials(filename=filename):\n print(serial)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef match_regex(filename, regex):\n with open(filename) as file:\n lines = file.readlines()\n for line in reversed(lines):\n match = re.match(regex, line)\n if match:\n regex = yield match.groups()[0]\n\n\ndef get_serials(filename):\n ERROR_RE = 'XFS ERROR (\\\\[sd[a-z]\\\\])'\n matcher = match_regex(filename, ERROR_RE)\n device = next(matcher)\n while True:\n bus_regex = '(sd \\\\S+) {}.*'.format(re.escape(device))\n print('bus_regex:', bus_regex)\n bus = matcher.send(bus_regex)\n serial_regex = '{} \\\\(SERIAL=([^)]*)\\\\)'.format(bus)\n print('serial_regex:', serial_regex)\n serial = matcher.send(serial_regex)\n yield serial\n device = matcher.send(ERROR_RE)\n\n\ndef main():\n filename = 'iter2/log2.txt'\n print('List of serial no found: ')\n for serial in get_serials(filename=filename):\n print(serial)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import re\n\n\ndef match_regex(filename, regex):\n with open(filename) as file:\n lines = file.readlines()\n for line in reversed(lines):\n match = re.match(regex, line)\n if match:\n regex = yield match.groups()[0]\n\n\ndef get_serials(filename):\n ERROR_RE = 'XFS ERROR (\\\\[sd[a-z]\\\\])'\n matcher = match_regex(filename, ERROR_RE)\n device = next(matcher)\n while True:\n bus_regex = '(sd \\\\S+) {}.*'.format(re.escape(device))\n print('bus_regex:', bus_regex)\n bus = matcher.send(bus_regex)\n serial_regex = '{} \\\\(SERIAL=([^)]*)\\\\)'.format(bus)\n print('serial_regex:', serial_regex)\n serial = matcher.send(serial_regex)\n yield serial\n device = matcher.send(ERROR_RE)\n\n\ndef main():\n filename = 'iter2/log2.txt'\n print('List of serial no found: ')\n for serial in get_serials(filename=filename):\n print(serial)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import re\n\ndef match_regex(filename, regex):\n with open(filename) as file:\n lines = file.readlines()\n\n for line in reversed(lines):\n match = re.match(regex, line)\n if match:\n regex = yield match.groups()[0]\n\ndef get_serials(filename):\n ERROR_RE = 'XFS ERROR (\\[sd[a-z]\\])'\n\n # Create generator of XFS ERROR\n matcher = match_regex(filename, ERROR_RE)\n device = next(matcher)\n\n while True:\n # Create regex pattern for BUS INFO base on DEVICE got ERROR\n bus_regex = '(sd \\S+) {}.*'.format(re.escape(device))\n print('bus_regex:', bus_regex)\n\n\n # Send BUS regex to generator to get BUS info of ERROR\n bus = matcher.send(bus_regex)\n\n # Send SERIAL regex to generator to get SERIAL NO of DEVICE in ERROR\n serial_regex = '{} \\(SERIAL=([^)]*)\\)'.format(bus)\n print('serial_regex:', serial_regex)\n serial = matcher.send(serial_regex)\n yield serial\n\n # Send ERROR regex to generator to get next DEVICE in ERROR\n device = matcher.send(ERROR_RE)\n\ndef main():\n filename = 'iter2/log2.txt'\n print('List of serial no found: ')\n for serial in get_serials(filename=filename):\n print(serial)\n\nif __name__ == '__main__':\n main()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#train a neural network from input video feed
import numpy as np
import cv2
vid = cv2.VideoCapture('trackmania_test_vid.mp4')
w = 1280//2
h = 720//2
vid_data = np.empty((360, 640, 3))
#print(vid_data.shape)
def process_frame(img):
global vid_data
img = cv2.resize(img, (w, h))
cv2.imshow('Frame', img)
cv2.waitKey(1)
vid_data = np.append(vid_data, img, axis=0)
#print(img.shape)
# Read until video is completed
n = 0
while vid.isOpened():
# Capture frame-by-frame
ret, frame = vid.read()
if ret:
#print("frame = {}".format(frame.shape))
process_frame(frame)
n = n + 1
'''
cv2.imshow('Frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
'''
else:
break
# When everything done, release the video capture object
vid.release()
# Closes all the frames
cv2.destroyAllWindows()
print(vid_data.shape)
vid_data = vid_data.reshape((vid_data.shape[0], -1))
print(vid_data.shape)
# n = 1340
#print('No. of frames = {}'.format(n))
np.savetxt("trackmania_vid_data2D_360x640.csv", vid_data, delimiter=",")
#50580,320,3 ---> 281,180,320,3
#101160,640,3 ---> 281,360,640,3
|
normal
|
{
"blob_id": "eb81b0e41743e1785b82e88f6a618dc91eba73e5",
"index": 1389,
"step-1": "<mask token>\n\n\ndef process_frame(img):\n global vid_data\n img = cv2.resize(img, (w, h))\n cv2.imshow('Frame', img)\n cv2.waitKey(1)\n vid_data = np.append(vid_data, img, axis=0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_frame(img):\n global vid_data\n img = cv2.resize(img, (w, h))\n cv2.imshow('Frame', img)\n cv2.waitKey(1)\n vid_data = np.append(vid_data, img, axis=0)\n\n\n<mask token>\nwhile vid.isOpened():\n ret, frame = vid.read()\n if ret:\n process_frame(frame)\n n = n + 1\n \"\"\"\n cv2.imshow('Frame', frame)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n \"\"\"\n else:\n break\nvid.release()\ncv2.destroyAllWindows()\nprint(vid_data.shape)\n<mask token>\nprint(vid_data.shape)\nnp.savetxt('trackmania_vid_data2D_360x640.csv', vid_data, delimiter=',')\n",
"step-3": "<mask token>\nvid = cv2.VideoCapture('trackmania_test_vid.mp4')\nw = 1280 // 2\nh = 720 // 2\nvid_data = np.empty((360, 640, 3))\n\n\ndef process_frame(img):\n global vid_data\n img = cv2.resize(img, (w, h))\n cv2.imshow('Frame', img)\n cv2.waitKey(1)\n vid_data = np.append(vid_data, img, axis=0)\n\n\nn = 0\nwhile vid.isOpened():\n ret, frame = vid.read()\n if ret:\n process_frame(frame)\n n = n + 1\n \"\"\"\n cv2.imshow('Frame', frame)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n \"\"\"\n else:\n break\nvid.release()\ncv2.destroyAllWindows()\nprint(vid_data.shape)\nvid_data = vid_data.reshape((vid_data.shape[0], -1))\nprint(vid_data.shape)\nnp.savetxt('trackmania_vid_data2D_360x640.csv', vid_data, delimiter=',')\n",
"step-4": "import numpy as np\nimport cv2\nvid = cv2.VideoCapture('trackmania_test_vid.mp4')\nw = 1280 // 2\nh = 720 // 2\nvid_data = np.empty((360, 640, 3))\n\n\ndef process_frame(img):\n global vid_data\n img = cv2.resize(img, (w, h))\n cv2.imshow('Frame', img)\n cv2.waitKey(1)\n vid_data = np.append(vid_data, img, axis=0)\n\n\nn = 0\nwhile vid.isOpened():\n ret, frame = vid.read()\n if ret:\n process_frame(frame)\n n = n + 1\n \"\"\"\n cv2.imshow('Frame', frame)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n \"\"\"\n else:\n break\nvid.release()\ncv2.destroyAllWindows()\nprint(vid_data.shape)\nvid_data = vid_data.reshape((vid_data.shape[0], -1))\nprint(vid_data.shape)\nnp.savetxt('trackmania_vid_data2D_360x640.csv', vid_data, delimiter=',')\n",
"step-5": "#train a neural network from input video feed\nimport numpy as np\nimport cv2\nvid = cv2.VideoCapture('trackmania_test_vid.mp4')\nw = 1280//2\nh = 720//2\n\nvid_data = np.empty((360, 640, 3))\n#print(vid_data.shape)\n\n\ndef process_frame(img):\n global vid_data\n img = cv2.resize(img, (w, h))\n cv2.imshow('Frame', img)\n cv2.waitKey(1)\n vid_data = np.append(vid_data, img, axis=0)\n #print(img.shape)\n\n\n# Read until video is completed\nn = 0\nwhile vid.isOpened():\n # Capture frame-by-frame\n ret, frame = vid.read()\n if ret:\n #print(\"frame = {}\".format(frame.shape))\n process_frame(frame)\n n = n + 1\n '''\n cv2.imshow('Frame', frame)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n '''\n else:\n break\n\n# When everything done, release the video capture object\nvid.release()\n\n# Closes all the frames\ncv2.destroyAllWindows()\nprint(vid_data.shape)\nvid_data = vid_data.reshape((vid_data.shape[0], -1))\nprint(vid_data.shape)\n# n = 1340\n#print('No. of frames = {}'.format(n))\n\nnp.savetxt(\"trackmania_vid_data2D_360x640.csv\", vid_data, delimiter=\",\")\n\n#50580,320,3 ---> 281,180,320,3\n#101160,640,3 ---> 281,360,640,3\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from .celery import app
from home.models import Banner
from settings.const import BANNER_COUNT
from home.serializers import BannerModelSerializer
from django.core.cache import cache
from django.conf import settings
@app.task
def update_banner_list():
# 获取最新内容
banner_query = Banner.objects.filter(is_delete=False, is_show=True).order_by('-orders')[:BANNER_COUNT]
# 序列化
banner_data = BannerModelSerializer(banner_query, many=True).data
for banner in banner_data:
banner['image'] = settings.END_BASE_URL + banner['image']
# 更新缓存
cache.set('banner_list', banner_data)
return True
|
normal
|
{
"blob_id": "8e85740123467889bdeb6b27d5eaa4b39df280ed",
"index": 438,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.task\ndef update_banner_list():\n banner_query = Banner.objects.filter(is_delete=False, is_show=True\n ).order_by('-orders')[:BANNER_COUNT]\n banner_data = BannerModelSerializer(banner_query, many=True).data\n for banner in banner_data:\n banner['image'] = settings.END_BASE_URL + banner['image']\n cache.set('banner_list', banner_data)\n return True\n",
"step-3": "from .celery import app\nfrom home.models import Banner\nfrom settings.const import BANNER_COUNT\nfrom home.serializers import BannerModelSerializer\nfrom django.core.cache import cache\nfrom django.conf import settings\n\n\n@app.task\ndef update_banner_list():\n banner_query = Banner.objects.filter(is_delete=False, is_show=True\n ).order_by('-orders')[:BANNER_COUNT]\n banner_data = BannerModelSerializer(banner_query, many=True).data\n for banner in banner_data:\n banner['image'] = settings.END_BASE_URL + banner['image']\n cache.set('banner_list', banner_data)\n return True\n",
"step-4": "from .celery import app\n\nfrom home.models import Banner\nfrom settings.const import BANNER_COUNT\nfrom home.serializers import BannerModelSerializer\nfrom django.core.cache import cache\nfrom django.conf import settings\n@app.task\ndef update_banner_list():\n # 获取最新内容\n banner_query = Banner.objects.filter(is_delete=False, is_show=True).order_by('-orders')[:BANNER_COUNT]\n # 序列化\n banner_data = BannerModelSerializer(banner_query, many=True).data\n for banner in banner_data:\n banner['image'] = settings.END_BASE_URL + banner['image']\n # 更新缓存\n cache.set('banner_list', banner_data)\n return True\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MyMainWindow(QMainWindow):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
<|reserved_special_token_0|>
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
<|reserved_special_token_0|>
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyMainWindow(QMainWindow):
<|reserved_special_token_0|>
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
<|reserved_special_token_0|>
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyMainWindow(QMainWindow):
<|reserved_special_token_0|>
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
<|reserved_special_token_0|>
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1
] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误',
'卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].
DataTrainX.shape[1], self.combineNumConv
) < self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误',
'池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,
QMessageBox.Yes)
return
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示',
'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',
self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示',
'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'
, self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
<|reserved_special_token_0|>
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.0
self.dataSetLossValue['New'] = 0.0
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.0
self.dataSetLossValue['Tra'] = 0.0
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
def setModelParameters(self):
if self.sender() is self.setModelParametersButton:
self.setModelParaW = (setModelParametersDialog.
setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))
elif self.sender() is self.setModelParametersButtonT:
self.setModelParaW = (setModelParametersDialog.
setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1
] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误',
'卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].
DataTrainX.shape[1], self.combineNumConv
) < self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误',
'池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,
QMessageBox.Yes)
return
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示',
'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',
self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示',
'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'
, self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.
combineNumConv, self.convCoreNum, self.
combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import sys
from PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame,
QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
import myLoadData
from UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\
showJudgeWidgets, chooseJudgeDataSetWidget
from MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.
self.dataSetLossValue['New'] = 0.
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.
self.dataSetLossValue['Tra'] = 0.
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
####### data module #######
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
###### training module ########
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
# self.setTrainingParametersButton = QPushButton('Trainning Parameters')
# self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
############## new cnn result show ######
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
################# new algorithm ui ##########
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
#########traditional data module##########
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
###### training module ########
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
############## traditional nn result show ######
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
####### traditional algorithm #########
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
########## whole frame layout ########
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet("QWidget { background-color: %s }" % col.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
# vbox.addWidget(QLabel(str('_'*int(self.width()/3))))
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
############ data load module #####################
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')
if ok:
# dataname = self.fname['New'].split('/')[-1].split('.')[0]
# # print(dataname)
# self.presentDataName.setText(dataname)
# self.presentDataName.resize(self.presentDataName.sizeHint())
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')
if ok:
# dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
# # print(dataname)
# self.presentDataNameT.setText(dataname)
# self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New'])
# print(self.dataFor['New'].DataTrainX, '\n', self.dataFor['New'].DataTrainY)
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message', "Data file not exist",
QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message', "Data file format error",
QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
# print(dataname)
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
# print(self.dataFor['Tra'].DataTrainX, '\n', self.dataFor['Tra'].DataTrainY)
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message', "Data file not exist",
QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message', "Data file format error",
QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
# print(dataname)
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra')
# print(self.dataLossRate)
# print(self.dataSetLossValue)
return
def showData(self):
if self.sender() is self.dataShowButton:
# print(1)
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New')
elif self.sender() is self.dataShowButtonT:
# print(1)
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!',
QMessageBox.Yes, QMessageBox.Yes)
return
############## training module #################
def setModelParameters(self):
if self.sender() is self.setModelParametersButton:
# print(1)
self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New')
elif self.sender() is self.setModelParametersButtonT:
self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra')
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
# print(self.trainingW)
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量',
QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\
< self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度',
QMessageBox.Yes, QMessageBox.Yes)
return
# print(self.trainingW)
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束',
QMessageBox.Yes, QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束',
QMessageBox.Yes, QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果', '模型保存成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果', '模型保存成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',
'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',
self, 'New')
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',
self, 'Tra')
# self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train')
# self.mcbcnn.runCNN('Test', self.dataFor['New'])
# drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1))
# drawCM.plotConfuseMatrix()
if __name__ == '__main__':
app = QApplication(sys.argv)
myMainWindow = MyMainWindow()
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "302605d8bb45b1529742bf9441d476f0276085b9",
"index": 9,
"step-1": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n <mask token>\n <mask token>\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n <mask token>\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n <mask token>\n <mask token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <mask token>\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n <mask token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n <mask token>\n <mask token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <mask token>\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n <mask token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n <mask token>\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <mask token>\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-5": "import sys\nfrom PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame,\n QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox)\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont, QColor\nimport myLoadData\nfrom UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\\\n showJudgeWidgets, chooseJudgeDataSetWidget\nfrom MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement\n\nclass MyMainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.windowLength = 1250\n self.windowHigh = 900\n\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.\n self.dataSetLossValue['New'] = 0.\n\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.\n self.dataSetLossValue['Tra'] = 0.\n\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n\n self.fullConnectOutInRate = 0.5\n\n self.mcbcnn = None\n self.trann = None\n\n self.trainingW = None\n self.trainingWT = None\n\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n\n ####### data module #######\n dataModule = QVBoxLayout()\n\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n\n\n ###### training module ########\n trainingModule = QVBoxLayout()\n\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n # self.setTrainingParametersButton = QPushButton('Trainning Parameters')\n # self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n\n ############## new cnn result show ######\n resultShowModule = QVBoxLayout()\n\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n\n ################# new algorithm ui ##########\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(dataModule)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(trainingModule)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(resultShowModule)\n\n hboxTop.addStretch(1)\n\n #########traditional data module##########\n dataModuleT = QVBoxLayout()\n\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n\n ###### training module ########\n trainingModuleT = QVBoxLayout()\n\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n\n ############## traditional nn result show ######\n resultShowModuleT = QVBoxLayout()\n\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n\n ####### traditional algorithm #########\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(dataModuleT)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(trainingModuleT)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(resultShowModuleT)\n\n hboxBottom.addStretch(1)\n\n ########## whole frame layout ########\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet(\"QWidget { background-color: %s }\" % col.name())\n splitterLine.resize(splitterLine.sizeHint())\n\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n # vbox.addWidget(QLabel(str('_'*int(self.width()/3))))\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n\n self.setCentralWidget(mainWidget)\n\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n\n############ data load module #####################\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')\n if ok:\n # dataname = self.fname['New'].split('/')[-1].split('.')[0]\n # # print(dataname)\n # self.presentDataName.setText(dataname)\n # self.presentDataName.resize(self.presentDataName.sizeHint())\n self.loadData()\n\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')\n if ok:\n # dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n # # print(dataname)\n # self.presentDataNameT.setText(dataname)\n # self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n self.loadData()\n\n return\n\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New'])\n # print(self.dataFor['New'].DataTrainX, '\\n', self.dataFor['New'].DataTrainY)\n\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message', \"Data file not exist\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n except Exception:\n reply = QMessageBox.information(self, 'Message', \"Data file format error\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n # print(dataname)\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n # print(self.dataFor['Tra'].DataTrainX, '\\n', self.dataFor['Tra'].DataTrainY)\n\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message', \"Data file not exist\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n except Exception:\n reply = QMessageBox.information(self, 'Message', \"Data file format error\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n # print(dataname)\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New')\n\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra')\n\n # print(self.dataLossRate)\n # print(self.dataSetLossValue)\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n # print(1)\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New')\n\n elif self.sender() is self.dataShowButtonT:\n # print(1)\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!',\n QMessageBox.Yes, QMessageBox.Yes)\n\n return\n\n ############## training module #################\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n # print(1)\n self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New')\n\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra')\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n # print(self.trainingW)\n self.trainingW.show()\n return\n senderName = 'New'\n\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n\n senderName = 'Tra'\n\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\\\n < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n # print(self.trainingW)\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果', '模型保存成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果', '模型保存成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling)\n\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n return\n\n def showResult(self):\n\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New')\n\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra')\n\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',\n self, 'New')\n\n elif self.sender() is self.judgeResultButtonT:\n\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',\n self, 'Tra')\n # self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train')\n # self.mcbcnn.runCNN('Test', self.dataFor['New'])\n # drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1))\n # drawCM.plotConfuseMatrix()\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myMainWindow = MyMainWindow()\n sys.exit(app.exec_())",
"step-ids": [
9,
11,
12,
15,
18
]
}
|
[
9,
11,
12,
15,
18
] |
from scipy.stats import itemfreq
from sklearn.model_selection import StratifiedKFold
from keras_utils.keras_utils import *
from keras.utils.np_utils import to_categorical
from keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten
from keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda
from keras.models import Model
from keras import backend as K
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
class MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):
def __init__(self, **kwargs):
super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)
self.supports_masking = True
class MaskableFlatten(Flatten):
def __init__(self, **kwargs):
super(MaskableFlatten, self).__init__(**kwargs)
self.supports_masking = True
# train data path
DATA1_TRAIN_PATH = '../data/data_1_train.csv'
DATA2_TRAIN_PATH = '../data/data_2_train.csv'
# GLoVe pre-trained word vectors path
EMBEDDING_DIR = '../embeddings/'
EMBEDDING_TYPE = 'glove.6B.300d.txt' # glove.6B.300d.txt
EMBEDDING_PICKLE_DIR = 'embeddings_index.p'
EMBEDDING_ERROR_DIR = 'embeddings_error.p'
ASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'
# tokenizer path
TOKENIZER_DIR = 'embeddings/tokenizer.p'
MAX_SEQ_LENGTH = 60
MAX_NB_WORDS = 95000
EMBEDDING_DIM = 300
# aspect dictionary
aspect_dict = {}
"""
What this model does:
2 ip - 1 op model : 2 ip = sentence and aspect sentence
Shared embedding layer = reduce # of params and chance to overfit.
sentence embedding = sentence passed through embedding layer (keep for later)
aspect embedding = aspect sentence passed through embedding layer
On this aspect embedding, use attention mechanism to jointly learn what is the "best" augmentation to the sentence embedding
- Dense layer that maps 1 : 1 between the aspect embedding and the aspect attention
- Softmax forces it to choose the "parts" of the sentence that help the most in training
- No bias needed for attention
- Next is to actually augment the aspect embeddings with this learned attention
- The element-wise multiplication forces many embeddings to become close to zero
- Only a few will remain "strong" after this multiplication. These are the "important" words in the aspect sentence
Finally, augment the original sentence embeddings with the attended aspect embeddings
- This will "add" some strength to the embeddings of the "important" words
- Remaining words will not be impacted at all (since they are added with near zero values)
Benefits of this model
- Choose if you want to send a unique aspect sentence for the corresponding sentence
- By this I mean, you have a choice
- 1) Use the original sentence as aspect input.
In doing so, it is basically like saying learn on your own what the aspect word is
It may not give much benefit, as the attended vector has the chance of being all equal (no attention)
- 2) Use a true aspect encoding as the aspect input.
Since you are sharing the embedding now, you cannot use random / own assigned aspects anymore.
The aspect ids that you pass will now be from the original embedding matrix using the word_index
dict that Keras gives you.
In this case, an aspect sentence would be of the form :
[0 0 ... 32506 66049 5968 0 0 ...]
Here 32506 = "Apple", 66049 = "Macbook" 5968 = "Pro" (say)
"""
NUM_CLASSES = 3 # 0 = neg, 1 = neutral, 2 = pos
MAX_SENTENCE_LENGTH = 60
MAX_NUM_WORDS = 20000 # this will be number of unique "words" (n-grams etc) there are
MAX_NUM_ASPECT_WORDS = 300 # this will be the number of unique aspect "words" (uni-grams only)
EMBEDDING_DIM = 300
EMBEDDING_WEIGHTS = None
MASK_ZEROS = True # this can be true ONLY for RNN models. If even 1 CNN is there, it will crash
#
# embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
# weights=EMBEDDING_WEIGHTS, trainable=False)
#
# sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
# aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
#
# sentence_embedding = embedding(sentence_ip) # Note: these are same embedding layer
# aspect_embedding = embedding(aspect_ip) # Note: these are same embedding layer
#
# # Create the attention vector for the aspect embeddings
# aspect_attention = Dense(EMBEDDING_DIM, activation='softmax', use_bias=False,
# name='aspect_attention')(aspect_embedding)
#
# # dampen the aspect embeddings according to the attention with an element-wise multiplication
# aspect_embedding = multiply([aspect_embedding, aspect_attention])
#
# # augment the sample embedding with information from the attended aspect embedding
# sentence_embedding = add([sentence_embedding, aspect_embedding])
#
# # now you can continue with whatever layer other than CNNs
#
# x = LSTM(100)(sentence_embedding)
# x = Dense(NUM_CLASSES, activation='softmax')(x)
#
# model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
#
# model.summary()
#
#
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='shared_embedding.png', show_shapes=False, show_layer_names=True)
#
"""
What this model does:
2 ip - 1 op model : 2 ip = sentence and aspect sentence
Disjoing embedding layer = more # of params and chance to overfit.
sentence embedding = sentence passed through embedding layer (keep for later ; not learned)
aspect embedding = aspect sentence passed through embedding layer (learned)
Benefits of this model
- Use a true aspect encoding as the aspect input.
Since you are learning the embedding now, you can use own assigned aspects.
In this case, an aspect sentence would be of the form :
[0 0 ... 2 2 2 0 0 ...]
Here 2 = "Apple", 2 = "Macbook" 2 = "Pro" (say)
Therefore, the id is given by you, and is shared over all of the aspect words for a given aspect term.
"""
def output_shape(input_shape):
shape = list(input_shape)
shape[-1] /= 2
print(shape)
return tuple(shape)
def model_2():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)
# labels = [x+1 for x in labels]
print(itemfreq(labels))
indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)
np.random.shuffle(indices)
padded_sequences = padded_sequences[indices]
labels = to_categorical(labels, num_classes=NUM_CLASSES)
labels = labels[indices]
aspect_sequences = aspect_sequences[indices]
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
# aspect_embedding = Embedding(MAX_NUM_ASPECT_WORDS, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
# this needs to be True
aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings
# Create the attention vector for the aspect embeddings
aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,
name='aspect_attention')(aspect_embedding)
# dampen the aspect embeddings according to the attention with an element-wise multiplication
aspect_embedding = multiply([aspect_embedding, aspect_attention])
# augment the sample embedding with information from the attended aspect embedding
sentence_embedding = concatenate([sentence_embedding, aspect_embedding])
# now you can continue with whatever layer other than CNNs
# x = MaskedGlobalAveragePooling1D()(sentence_embedding)
# x = MaskableFlatten()(sentence_embedding)
x = LSTM(256)(sentence_embedding)
# y = Lambda(lambda z: z[:, :, :NUM_CELLS//2], output_shape=output_shape)(x)
# x = Dense(NUM_CELLS//2, activation='softmax', use_bias=False)(x)
# x = multiply([x, y])
# x = MaskedGlobalAveragePooling1D()(x)
# x = Dense(256, activation='linear', kernel_initializer='he_normal')(x)
# x = BatchNormalization()(x)
# x = LeakyReLU()(x)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print(model.summary())
model.fit([padded_sequences, aspect_sequences], labels, epochs=10, verbose=1, validation_split=0.2)
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='learned_embedding.png', show_shapes=False, show_layer_names=True)
def model_2_CV():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(tech_reviews)
labels = np.array([x + 1 for x in labels])
print(itemfreq(labels))
# Random shuffling of padded, aspect sequences and labels
# indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)
# np.random.shuffle(indices)
# padded_sequences = padded_sequences[indices]
# labels = to_categorical(labels, num_classes=NUM_CLASSES)
# labels = labels[indices]
# aspect_sequences = aspect_sequences[indices]
print(labels.shape)
N_FOLDS = 3
fbeta_scores = []
skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)
for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):
print('Fold %d' % (j + 1))
sentence_train, aspect_train, y_train = padded_sequences[train_idx], aspect_sequences[train_idx], \
labels[train_idx]
sentence_test, aspect_test, y_test = padded_sequences[test_idx], aspect_sequences[test_idx], labels[test_idx]
y_train = to_categorical(y_train, 3)
y_test = to_categorical(y_test, 3)
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings
# Create the attention vector for the aspect embeddings
aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,
name='aspect_attention')(aspect_embedding)
# dampen the aspect embeddings according to the attention with an element-wise multiplication
aspect_embedding = multiply([aspect_embedding, aspect_attention])
# augment the sample embedding with information from the attended aspect embedding
sentence_embedding = concatenate([sentence_embedding, aspect_embedding])
x = LSTM(256)(sentence_embedding)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', fbeta_score])
print(model.summary())
model.fit([sentence_train, aspect_train], y_train, epochs=5, verbose=1,
validation_data=([sentence_test, aspect_test], y_test))
scores = model.evaluate([sentence_test, aspect_test], y_test)
fbeta_scores.append(scores[-1])
print("Average fbeta score : ", sum(fbeta_scores) / len(fbeta_scores))
def model_3():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)
labels = np.array([x + 1 for x in labels])
print(itemfreq(labels))
N_FOLDS = 10
skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)
f = open('history.txt', 'w+')
for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):
print('Fold %d' % (j + 1))
sentence_train, y_train = padded_sequences[train_idx], labels[train_idx]
sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]
y_train = to_categorical(y_train, 3)
y_test = to_categorical(y_test, 3)
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
# labels = to_categorical(labels, 3)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=sentence_ip, outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1, precision, recall])
print(model.summary())
history = model.fit(sentence_train, y_train, epochs=10, verbose=1, validation_data=(sentence_test, y_test))
f.write('\nFold %d\n' % (j + 1))
f.write(str(history.history['acc']))
f.write(str(history.history['val_acc']))
f.write(str(history.history['f1']))
f.write(str(history.history['precision']))
f.write(str(history.history['recall']))
if __name__ == '__main__':
model_3()
|
normal
|
{
"blob_id": "0b125e7e9e763d4fd71e381ca823f9e9aa8ea606",
"index": 8198,
"step-1": "<mask token>\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\n<mask token>\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f1(y_true, y_pred):\n\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * (precision * recall / (precision + recall + K.epsilon()))\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\n<mask token>\n\n\ndef output_shape(input_shape):\n shape = list(input_shape)\n shape[-1] /= 2\n print(shape)\n return tuple(shape)\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f1(y_true, y_pred):\n\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * (precision * recall / (precision + recall + K.epsilon()))\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nDATA1_TRAIN_PATH = '../data/data_1_train.csv'\nDATA2_TRAIN_PATH = '../data/data_2_train.csv'\nEMBEDDING_DIR = '../embeddings/'\nEMBEDDING_TYPE = 'glove.6B.300d.txt'\nEMBEDDING_PICKLE_DIR = 'embeddings_index.p'\nEMBEDDING_ERROR_DIR = 'embeddings_error.p'\nASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'\nTOKENIZER_DIR = 'embeddings/tokenizer.p'\nMAX_SEQ_LENGTH = 60\nMAX_NB_WORDS = 95000\nEMBEDDING_DIM = 300\naspect_dict = {}\n<mask token>\nNUM_CLASSES = 3\nMAX_SENTENCE_LENGTH = 60\nMAX_NUM_WORDS = 20000\nMAX_NUM_ASPECT_WORDS = 300\nEMBEDDING_DIM = 300\nEMBEDDING_WEIGHTS = None\nMASK_ZEROS = True\n<mask token>\n\n\ndef output_shape(input_shape):\n shape = list(input_shape)\n shape[-1] /= 2\n print(shape)\n return tuple(shape)\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\nif __name__ == '__main__':\n model_3()\n",
"step-4": "from scipy.stats import itemfreq\nfrom sklearn.model_selection import StratifiedKFold\nfrom keras_utils.keras_utils import *\nfrom keras.utils.np_utils import to_categorical\nfrom keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten\nfrom keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda\nfrom keras.models import Model\nfrom keras import backend as K\n\n\ndef f1(y_true, y_pred):\n\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * (precision * recall / (precision + recall + K.epsilon()))\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nDATA1_TRAIN_PATH = '../data/data_1_train.csv'\nDATA2_TRAIN_PATH = '../data/data_2_train.csv'\nEMBEDDING_DIR = '../embeddings/'\nEMBEDDING_TYPE = 'glove.6B.300d.txt'\nEMBEDDING_PICKLE_DIR = 'embeddings_index.p'\nEMBEDDING_ERROR_DIR = 'embeddings_error.p'\nASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'\nTOKENIZER_DIR = 'embeddings/tokenizer.p'\nMAX_SEQ_LENGTH = 60\nMAX_NB_WORDS = 95000\nEMBEDDING_DIM = 300\naspect_dict = {}\n<mask token>\nNUM_CLASSES = 3\nMAX_SENTENCE_LENGTH = 60\nMAX_NUM_WORDS = 20000\nMAX_NUM_ASPECT_WORDS = 300\nEMBEDDING_DIM = 300\nEMBEDDING_WEIGHTS = None\nMASK_ZEROS = True\n<mask token>\n\n\ndef output_shape(input_shape):\n shape = list(input_shape)\n shape[-1] /= 2\n print(shape)\n return tuple(shape)\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\nif __name__ == '__main__':\n model_3()\n",
"step-5": "from scipy.stats import itemfreq\r\nfrom sklearn.model_selection import StratifiedKFold\r\n\r\nfrom keras_utils.keras_utils import *\r\n\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten\r\nfrom keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda\r\nfrom keras.models import Model\r\nfrom keras import backend as K\r\n\r\n\r\ndef f1(y_true, y_pred):\r\n def recall(y_true, y_pred):\r\n \"\"\"Recall metric.\r\n\r\n Only computes a batch-wise average of recall.\r\n\r\n Computes the recall, a metric for multi-label classification of\r\n how many relevant items are selected.\r\n \"\"\"\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n def precision(y_true, y_pred):\r\n \"\"\"Precision metric.\r\n\r\n Only computes a batch-wise average of precision.\r\n\r\n Computes the precision, a metric for multi-label classification of\r\n how many selected items are relevant.\r\n \"\"\"\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\n precision = precision(y_true, y_pred)\r\n recall = recall(y_true, y_pred)\r\n return 2 * ((precision * recall) / (precision + recall + K.epsilon()))\r\n\r\n\r\ndef precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\n\r\ndef recall(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n\r\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\r\n\r\n def __init__(self, **kwargs):\r\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n\r\n\r\nclass MaskableFlatten(Flatten):\r\n\r\n def __init__(self, **kwargs):\r\n super(MaskableFlatten, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n\r\n\r\n# train data path\r\nDATA1_TRAIN_PATH = '../data/data_1_train.csv'\r\nDATA2_TRAIN_PATH = '../data/data_2_train.csv'\r\n\r\n# GLoVe pre-trained word vectors path\r\nEMBEDDING_DIR = '../embeddings/'\r\nEMBEDDING_TYPE = 'glove.6B.300d.txt' # glove.6B.300d.txt\r\nEMBEDDING_PICKLE_DIR = 'embeddings_index.p'\r\nEMBEDDING_ERROR_DIR = 'embeddings_error.p'\r\nASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'\r\n\r\n# tokenizer path\r\nTOKENIZER_DIR = 'embeddings/tokenizer.p'\r\n\r\nMAX_SEQ_LENGTH = 60\r\nMAX_NB_WORDS = 95000\r\nEMBEDDING_DIM = 300\r\n\r\n# aspect dictionary\r\naspect_dict = {}\r\n\r\n\"\"\"\r\nWhat this model does:\r\n\r\n2 ip - 1 op model : 2 ip = sentence and aspect sentence\r\n\r\nShared embedding layer = reduce # of params and chance to overfit.\r\nsentence embedding = sentence passed through embedding layer (keep for later)\r\naspect embedding = aspect sentence passed through embedding layer \r\n\r\nOn this aspect embedding, use attention mechanism to jointly learn what is the \"best\" augmentation to the sentence embedding\r\n- Dense layer that maps 1 : 1 between the aspect embedding and the aspect attention\r\n - Softmax forces it to choose the \"parts\" of the sentence that help the most in training\r\n - No bias needed for attention\r\n\r\n- Next is to actually augment the aspect embeddings with this learned attention\r\n - The element-wise multiplication forces many embeddings to become close to zero\r\n - Only a few will remain \"strong\" after this multiplication. These are the \"important\" words in the aspect sentence\r\n\r\nFinally, augment the original sentence embeddings with the attended aspect embeddings\r\n- This will \"add\" some strength to the embeddings of the \"important\" words\r\n- Remaining words will not be impacted at all (since they are added with near zero values)\r\n\r\nBenefits of this model\r\n- Choose if you want to send a unique aspect sentence for the corresponding sentence\r\n - By this I mean, you have a choice\r\n - 1) Use the original sentence as aspect input.\r\n In doing so, it is basically like saying learn on your own what the aspect word is\r\n It may not give much benefit, as the attended vector has the chance of being all equal (no attention)\r\n - 2) Use a true aspect encoding as the aspect input.\r\n Since you are sharing the embedding now, you cannot use random / own assigned aspects anymore.\r\n The aspect ids that you pass will now be from the original embedding matrix using the word_index\r\n dict that Keras gives you.\r\n\r\n In this case, an aspect sentence would be of the form : \r\n [0 0 ... 32506 66049 5968 0 0 ...] \r\n Here 32506 = \"Apple\", 66049 = \"Macbook\" 5968 = \"Pro\" (say)\r\n\r\n\"\"\"\r\n\r\nNUM_CLASSES = 3 # 0 = neg, 1 = neutral, 2 = pos\r\n\r\nMAX_SENTENCE_LENGTH = 60\r\nMAX_NUM_WORDS = 20000 # this will be number of unique \"words\" (n-grams etc) there are\r\nMAX_NUM_ASPECT_WORDS = 300 # this will be the number of unique aspect \"words\" (uni-grams only)\r\n\r\nEMBEDDING_DIM = 300\r\nEMBEDDING_WEIGHTS = None\r\n\r\nMASK_ZEROS = True # this can be true ONLY for RNN models. If even 1 CNN is there, it will crash\r\n\r\n#\r\n# embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n# weights=EMBEDDING_WEIGHTS, trainable=False)\r\n#\r\n# sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n# aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n#\r\n# sentence_embedding = embedding(sentence_ip) # Note: these are same embedding layer\r\n# aspect_embedding = embedding(aspect_ip) # Note: these are same embedding layer\r\n#\r\n# # Create the attention vector for the aspect embeddings\r\n# aspect_attention = Dense(EMBEDDING_DIM, activation='softmax', use_bias=False,\r\n# name='aspect_attention')(aspect_embedding)\r\n#\r\n# # dampen the aspect embeddings according to the attention with an element-wise multiplication\r\n# aspect_embedding = multiply([aspect_embedding, aspect_attention])\r\n#\r\n# # augment the sample embedding with information from the attended aspect embedding\r\n# sentence_embedding = add([sentence_embedding, aspect_embedding])\r\n#\r\n# # now you can continue with whatever layer other than CNNs\r\n#\r\n# x = LSTM(100)(sentence_embedding)\r\n# x = Dense(NUM_CLASSES, activation='softmax')(x)\r\n#\r\n# model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\r\n#\r\n# model.summary()\r\n#\r\n#\r\n# from keras.utils.vis_utils import plot_model\r\n# plot_model(model, to_file='shared_embedding.png', show_shapes=False, show_layer_names=True)\r\n#\r\n\r\n\"\"\"\r\nWhat this model does:\r\n\r\n2 ip - 1 op model : 2 ip = sentence and aspect sentence\r\n\r\nDisjoing embedding layer = more # of params and chance to overfit.\r\nsentence embedding = sentence passed through embedding layer (keep for later ; not learned)\r\naspect embedding = aspect sentence passed through embedding layer (learned)\r\n\r\nBenefits of this model\r\n- Use a true aspect encoding as the aspect input.\r\n Since you are learning the embedding now, you can use own assigned aspects.\r\n \r\n In this case, an aspect sentence would be of the form : \r\n [0 0 ... 2 2 2 0 0 ...] \r\n Here 2 = \"Apple\", 2 = \"Macbook\" 2 = \"Pro\" (say)\r\n Therefore, the id is given by you, and is shared over all of the aspect words for a given aspect term.\r\n\r\n\"\"\"\r\n\r\n\r\ndef output_shape(input_shape):\r\n shape = list(input_shape)\r\n shape[-1] /= 2\r\n print(shape)\r\n return tuple(shape)\r\n\r\n\r\ndef model_2():\r\n K.clear_session()\r\n tech_reviews, food_reviews = load_and_clean()\r\n embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)\r\n # labels = [x+1 for x in labels]\r\n print(itemfreq(labels))\r\n\r\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\r\n np.random.shuffle(indices)\r\n padded_sequences = padded_sequences[indices]\r\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\r\n labels = labels[indices]\r\n aspect_sequences = aspect_sequences[indices]\r\n\r\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n weights=EMBEDDING_WEIGHTS, trainable=False)\r\n\r\n # aspect_embedding = Embedding(MAX_NUM_ASPECT_WORDS, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)\r\n # this needs to be True\r\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)\r\n\r\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n\r\n sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings\r\n aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings\r\n\r\n # Create the attention vector for the aspect embeddings\r\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,\r\n name='aspect_attention')(aspect_embedding)\r\n\r\n # dampen the aspect embeddings according to the attention with an element-wise multiplication\r\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\r\n # augment the sample embedding with information from the attended aspect embedding\r\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\r\n\r\n # now you can continue with whatever layer other than CNNs\r\n\r\n # x = MaskedGlobalAveragePooling1D()(sentence_embedding)\r\n # x = MaskableFlatten()(sentence_embedding)\r\n x = LSTM(256)(sentence_embedding)\r\n # y = Lambda(lambda z: z[:, :, :NUM_CELLS//2], output_shape=output_shape)(x)\r\n # x = Dense(NUM_CELLS//2, activation='softmax', use_bias=False)(x)\r\n\r\n # x = multiply([x, y])\r\n # x = MaskedGlobalAveragePooling1D()(x)\r\n # x = Dense(256, activation='linear', kernel_initializer='he_normal')(x)\r\n # x = BatchNormalization()(x)\r\n # x = LeakyReLU()(x)\r\n x = Dense(3, activation='softmax')(x)\r\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\r\n\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\r\n\r\n print(model.summary())\r\n\r\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10, verbose=1, validation_split=0.2)\r\n\r\n # from keras.utils.vis_utils import plot_model\r\n # plot_model(model, to_file='learned_embedding.png', show_shapes=False, show_layer_names=True)\r\n\r\n\r\ndef model_2_CV():\r\n K.clear_session()\r\n tech_reviews, food_reviews = load_and_clean()\r\n embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(tech_reviews)\r\n labels = np.array([x + 1 for x in labels])\r\n print(itemfreq(labels))\r\n\r\n # Random shuffling of padded, aspect sequences and labels\r\n # indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\r\n # np.random.shuffle(indices)\r\n # padded_sequences = padded_sequences[indices]\r\n # labels = to_categorical(labels, num_classes=NUM_CLASSES)\r\n # labels = labels[indices]\r\n # aspect_sequences = aspect_sequences[indices]\r\n print(labels.shape)\r\n\r\n N_FOLDS = 3\r\n fbeta_scores = []\r\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\r\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):\r\n print('Fold %d' % (j + 1))\r\n sentence_train, aspect_train, y_train = padded_sequences[train_idx], aspect_sequences[train_idx], \\\r\n labels[train_idx]\r\n sentence_test, aspect_test, y_test = padded_sequences[test_idx], aspect_sequences[test_idx], labels[test_idx]\r\n\r\n y_train = to_categorical(y_train, 3)\r\n y_test = to_categorical(y_test, 3)\r\n\r\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n weights=EMBEDDING_WEIGHTS, trainable=False)\r\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)\r\n\r\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n\r\n sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings\r\n aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings\r\n\r\n # Create the attention vector for the aspect embeddings\r\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,\r\n name='aspect_attention')(aspect_embedding)\r\n # dampen the aspect embeddings according to the attention with an element-wise multiplication\r\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\r\n # augment the sample embedding with information from the attended aspect embedding\r\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\r\n x = LSTM(256)(sentence_embedding)\r\n x = Dense(3, activation='softmax')(x)\r\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\r\n\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', fbeta_score])\r\n\r\n print(model.summary())\r\n\r\n model.fit([sentence_train, aspect_train], y_train, epochs=5, verbose=1,\r\n validation_data=([sentence_test, aspect_test], y_test))\r\n\r\n scores = model.evaluate([sentence_test, aspect_test], y_test)\r\n fbeta_scores.append(scores[-1])\r\n\r\n print(\"Average fbeta score : \", sum(fbeta_scores) / len(fbeta_scores))\r\n\r\n\r\ndef model_3():\r\n K.clear_session()\r\n tech_reviews, food_reviews = load_and_clean()\r\n embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)\r\n labels = np.array([x + 1 for x in labels])\r\n print(itemfreq(labels))\r\n\r\n N_FOLDS = 10\r\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\r\n f = open('history.txt', 'w+')\r\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):\r\n print('Fold %d' % (j + 1))\r\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx]\r\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\r\n\r\n y_train = to_categorical(y_train, 3)\r\n y_test = to_categorical(y_test, 3)\r\n\r\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n weights=EMBEDDING_WEIGHTS, trainable=False)\r\n # labels = to_categorical(labels, 3)\r\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings\r\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\r\n x = Dense(3, activation='softmax')(x)\r\n model = Model(inputs=sentence_ip, outputs=x)\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1, precision, recall])\r\n print(model.summary())\r\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1, validation_data=(sentence_test, y_test))\r\n f.write('\\nFold %d\\n' % (j + 1))\r\n f.write(str(history.history['acc']))\r\n f.write(str(history.history['val_acc']))\r\n f.write(str(history.history['f1']))\r\n f.write(str(history.history['precision']))\r\n f.write(str(history.history['recall']))\r\n\r\n\r\nif __name__ == '__main__':\r\n model_3()\r\n",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(
"""Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'.
Αν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,
αν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'
"""
)
<|reserved_special_token_0|>
while check == True:
newNumber = input('Δώσε μου τη καταχώρηση σου: ')
if newNumber != 'q' and newNumber != 'r' and newNumber != '0r':
if newNumber[0] != '0':
alist.append(float(newNumber))
check = True
else:
numberToList = list(newNumber)
numberToList.pop(0)
listToNumber = ''.join(numberToList)
alist.insert(0, float(listToNumber))
check = True
print(alist)
elif newNumber == 'r':
print('\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****', alist
[len(alist) - 1])
alist.pop(len(alist) - 1)
print(alist)
check = True
elif newNumber == '0r':
print('\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****', alist[0])
alist.pop(0)
print(alist)
check = True
else:
print('\nΤέλος εφαρμογής!')
check = False
<|reserved_special_token_1|>
print(
"""Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'.
Αν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,
αν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'
"""
)
newNumber = input('Για να ξεκινήσεις, πάτησε Enter \n')
alist = []
check = True
while check == True:
newNumber = input('Δώσε μου τη καταχώρηση σου: ')
if newNumber != 'q' and newNumber != 'r' and newNumber != '0r':
if newNumber[0] != '0':
alist.append(float(newNumber))
check = True
else:
numberToList = list(newNumber)
numberToList.pop(0)
listToNumber = ''.join(numberToList)
alist.insert(0, float(listToNumber))
check = True
print(alist)
elif newNumber == 'r':
print('\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****', alist
[len(alist) - 1])
alist.pop(len(alist) - 1)
print(alist)
check = True
elif newNumber == '0r':
print('\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****', alist[0])
alist.pop(0)
print(alist)
check = True
else:
print('\nΤέλος εφαρμογής!')
check = False
<|reserved_special_token_1|>
#Άσκηση 3.2: Ουρά δύο άκρων
print("Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\n ")
newNumber = input("Για να ξεκινήσεις, πάτησε Enter \n")
alist = []
check = True
while check == True :
newNumber = input("Δώσε μου τη καταχώρηση σου: ")
if newNumber != 'q' and newNumber != 'r' and newNumber != '0r' :
if newNumber[0] != '0' :
alist.append(float(newNumber))
check = True
else :
numberToList = list(newNumber)
numberToList.pop(0)
listToNumber = ''.join(numberToList)
alist.insert(0, float(listToNumber))
check = True
print(alist)
elif newNumber == 'r':
print("\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****", alist[(len(alist) - 1)])
alist.pop((len(alist))-1)
print(alist)
check = True
elif newNumber == '0r' :
print("\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****", alist[0])
alist.pop(0)
print(alist)
check = True
else:
print("\nΤέλος εφαρμογής!")
check = False
#παρατηρήσεις :
#1) Στο πρόγραμμα δεν έχει μπει κάποιος έλεγχος για την εισοδο του χρήστη κι έτσι αν πληκτρολογήσει κάτι εκτος από αριθμό ή 'q' / 'r' / '0r' το πρόγραμμα σκάει
#2) Ο έλεγχος με το 'r', '0r' έγινε εκτός της πρώτης εισόδου για να συμπεριλάβουμε τη περίπτωση που η λίστα ειναι κενή. Αντίστοιχα η εκτέλεση του προγραμματος
#θα βγάλει σφάλμα αν παω να αφαιρέσω και το τελευταιο στοιχειο της λίστας και πατήσω 'r' ή '0r'
|
flexible
|
{
"blob_id": "87bcf53d1c93645a08b10ba0d02edf0d5b0a4906",
"index": 5664,
"step-1": "<mask token>\n",
"step-2": "print(\n \"\"\"Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\n \"\"\"\n )\n<mask token>\nwhile check == True:\n newNumber = input('Δώσε μου τη καταχώρηση σου: ')\n if newNumber != 'q' and newNumber != 'r' and newNumber != '0r':\n if newNumber[0] != '0':\n alist.append(float(newNumber))\n check = True\n else:\n numberToList = list(newNumber)\n numberToList.pop(0)\n listToNumber = ''.join(numberToList)\n alist.insert(0, float(listToNumber))\n check = True\n print(alist)\n elif newNumber == 'r':\n print('\\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****', alist\n [len(alist) - 1])\n alist.pop(len(alist) - 1)\n print(alist)\n check = True\n elif newNumber == '0r':\n print('\\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****', alist[0])\n alist.pop(0)\n print(alist)\n check = True\n else:\n print('\\nΤέλος εφαρμογής!')\n check = False\n",
"step-3": "print(\n \"\"\"Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\n \"\"\"\n )\nnewNumber = input('Για να ξεκινήσεις, πάτησε Enter \\n')\nalist = []\ncheck = True\nwhile check == True:\n newNumber = input('Δώσε μου τη καταχώρηση σου: ')\n if newNumber != 'q' and newNumber != 'r' and newNumber != '0r':\n if newNumber[0] != '0':\n alist.append(float(newNumber))\n check = True\n else:\n numberToList = list(newNumber)\n numberToList.pop(0)\n listToNumber = ''.join(numberToList)\n alist.insert(0, float(listToNumber))\n check = True\n print(alist)\n elif newNumber == 'r':\n print('\\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****', alist\n [len(alist) - 1])\n alist.pop(len(alist) - 1)\n print(alist)\n check = True\n elif newNumber == '0r':\n print('\\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****', alist[0])\n alist.pop(0)\n print(alist)\n check = True\n else:\n print('\\nΤέλος εφαρμογής!')\n check = False\n",
"step-4": "#Άσκηση 3.2: Ουρά δύο άκρων\r\n\r\nprint(\"Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \\nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\\n \")\r\n\r\nnewNumber = input(\"Για να ξεκινήσεις, πάτησε Enter \\n\")\r\nalist = []\r\ncheck = True\r\n\r\nwhile check == True :\r\n \r\n newNumber = input(\"Δώσε μου τη καταχώρηση σου: \")\r\n if newNumber != 'q' and newNumber != 'r' and newNumber != '0r' :\r\n if newNumber[0] != '0' :\r\n alist.append(float(newNumber))\r\n check = True \r\n else :\r\n numberToList = list(newNumber)\r\n numberToList.pop(0)\r\n listToNumber = ''.join(numberToList)\r\n alist.insert(0, float(listToNumber))\r\n check = True\r\n print(alist)\r\n\r\n \r\n elif newNumber == 'r':\r\n print(\"\\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****\", alist[(len(alist) - 1)])\r\n alist.pop((len(alist))-1)\r\n print(alist)\r\n check = True\r\n elif newNumber == '0r' :\r\n print(\"\\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****\", alist[0])\r\n alist.pop(0)\r\n print(alist)\r\n check = True\r\n \r\n else:\r\n print(\"\\nΤέλος εφαρμογής!\")\r\n check = False\r\n\r\n \r\n#παρατηρήσεις :\r\n#1) Στο πρόγραμμα δεν έχει μπει κάποιος έλεγχος για την εισοδο του χρήστη κι έτσι αν πληκτρολογήσει κάτι εκτος από αριθμό ή 'q' / 'r' / '0r' το πρόγραμμα σκάει\r\n#2) Ο έλεγχος με το 'r', '0r' έγινε εκτός της πρώτης εισόδου για να συμπεριλάβουμε τη περίπτωση που η λίστα ειναι κενή. Αντίστοιχα η εκτέλεση του προγραμματος\r\n #θα βγάλει σφάλμα αν παω να αφαιρέσω και το τελευταιο στοιχειο της λίστας και πατήσω 'r' ή '0r'\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,
fill_opacity=1).shift(2 * RIGHT)
fr = Dot().shift(UP + 3 * RIGHT)
fl = Dot().shift(UP + RIGHT)
rl = Dot().shift(DOWN + RIGHT)
rr = Dot().shift(DOWN + 3 * RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3 * LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(3 * LEFT)
left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *
x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))
right_pad = Circle(radius=0.5).move_to(1 * LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(1 * LEFT)
right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *
rot_tracker.get_value() * RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),
ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick),
ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),
ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,
rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,
run_time=2, rate_func=smooth))
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func
=there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=
2, rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.
shift, 0.3 * DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.
set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.
set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.
shift, 0.3 * UP))
fr_vector.add_updater(updateFRArrow)
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),
FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),
FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),
FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,
fill_opacity=1).shift(2 * RIGHT)
fr = Dot().shift(UP + 3 * RIGHT)
fl = Dot().shift(UP + RIGHT)
rl = Dot().shift(DOWN + RIGHT)
rr = Dot().shift(DOWN + 3 * RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3 * LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(3 * LEFT)
left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *
x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))
right_pad = Circle(radius=0.5).move_to(1 * LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(1 * LEFT)
right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *
rot_tracker.get_value() * RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),
ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick),
ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),
ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,
rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,
run_time=2, rate_func=smooth))
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func
=there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=
2, rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.
shift, 0.3 * DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.
set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.
set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.
shift, 0.3 * UP))
fr_vector.add_updater(updateFRArrow)
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),
FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),
FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),
FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
<|reserved_special_token_0|>
def calculateVectors(FWD, STR, RCW, gyroAngle):
temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)
STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)
FWD = temp
R = math.hypot(wheelBase, trackWidth)
A = STR - RCW * (wheelBase / R)
B = STR + RCW * (wheelBase / R)
C = FWD - RCW * (trackWidth / R)
D = FWD + RCW * (trackWidth / R)
fr_ws = math.hypot(B, C)
fl_ws = math.hypot(B, D)
bl_ws = math.hypot(A, D)
br_ws = math.hypot(A, C)
fr_wa = math.atan2(B, C) * 180 / math.pi
fl_wa = math.atan2(B, D) * 180 / math.pi
bl_wa = math.atan2(A, D) * 180 / math.pi
br_wa = math.atan2(A, C) * 180 / math.pi
max = fr_ws
if fl_ws > max:
max = fl_ws
if bl_ws > max:
max = bl_ws
if br_ws > max:
max = br_ws
if max > 1:
fr_ws /= max
fl_ws /= max
bl_ws /= max
br_ws /= max
return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,
br_wa]])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,
fill_opacity=1).shift(2 * RIGHT)
fr = Dot().shift(UP + 3 * RIGHT)
fl = Dot().shift(UP + RIGHT)
rl = Dot().shift(DOWN + RIGHT)
rr = Dot().shift(DOWN + 3 * RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3 * LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(3 * LEFT)
left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *
x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))
right_pad = Circle(radius=0.5).move_to(1 * LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(1 * LEFT)
right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *
rot_tracker.get_value() * RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),
ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick),
ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),
ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,
rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,
run_time=2, rate_func=smooth))
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func
=there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=
2, rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.
shift, 0.3 * DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.
set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.
set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.
shift, 0.3 * UP))
fr_vector.add_updater(updateFRArrow)
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),
FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),
FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),
FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
wheelBase = 10
trackWidth = 10
def calculateVectors(FWD, STR, RCW, gyroAngle):
temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)
STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)
FWD = temp
R = math.hypot(wheelBase, trackWidth)
A = STR - RCW * (wheelBase / R)
B = STR + RCW * (wheelBase / R)
C = FWD - RCW * (trackWidth / R)
D = FWD + RCW * (trackWidth / R)
fr_ws = math.hypot(B, C)
fl_ws = math.hypot(B, D)
bl_ws = math.hypot(A, D)
br_ws = math.hypot(A, C)
fr_wa = math.atan2(B, C) * 180 / math.pi
fl_wa = math.atan2(B, D) * 180 / math.pi
bl_wa = math.atan2(A, D) * 180 / math.pi
br_wa = math.atan2(A, C) * 180 / math.pi
max = fr_ws
if fl_ws > max:
max = fl_ws
if bl_ws > max:
max = bl_ws
if br_ws > max:
max = br_ws
if max > 1:
fr_ws /= max
fl_ws /= max
bl_ws /= max
br_ws /= max
return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,
br_wa]])
<|reserved_special_token_1|>
from manimlib.imports import *
import math
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,
fill_opacity=1).shift(2 * RIGHT)
fr = Dot().shift(UP + 3 * RIGHT)
fl = Dot().shift(UP + RIGHT)
rl = Dot().shift(DOWN + RIGHT)
rr = Dot().shift(DOWN + 3 * RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +
vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *
np.sin(np.radians(vector[1])) * RIGHT))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.
get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *
RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP +
vector[0] * np.sin(np.radians(vector[1])) * RIGHT))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3 * LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(3 * LEFT)
left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *
x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))
right_pad = Circle(radius=0.5).move_to(1 * LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1
).move_to(1 * LEFT)
right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *
rot_tracker.get_value() * RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),
ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick),
ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),
ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,
rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,
run_time=2, rate_func=smooth))
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func
=there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=
2, rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func
=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,
rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,
rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.
shift, 0.3 * DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.
set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.
set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.
shift, 0.3 * UP))
fr_vector.add_updater(updateFRArrow)
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,
rate_func=smooth))
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),
FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),
FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),
FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
wheelBase = 10
trackWidth = 10
def calculateVectors(FWD, STR, RCW, gyroAngle):
temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)
STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)
FWD = temp
R = math.hypot(wheelBase, trackWidth)
A = STR - RCW * (wheelBase / R)
B = STR + RCW * (wheelBase / R)
C = FWD - RCW * (trackWidth / R)
D = FWD + RCW * (trackWidth / R)
fr_ws = math.hypot(B, C)
fl_ws = math.hypot(B, D)
bl_ws = math.hypot(A, D)
br_ws = math.hypot(A, C)
fr_wa = math.atan2(B, C) * 180 / math.pi
fl_wa = math.atan2(B, D) * 180 / math.pi
bl_wa = math.atan2(A, D) * 180 / math.pi
br_wa = math.atan2(A, C) * 180 / math.pi
max = fr_ws
if fl_ws > max:
max = fl_ws
if bl_ws > max:
max = bl_ws
if br_ws > max:
max = br_ws
if max > 1:
fr_ws /= max
fl_ws /= max
bl_ws /= max
br_ws /= max
return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,
br_wa]])
<|reserved_special_token_1|>
from manimlib.imports import *
import math
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY, fill_opacity=1).shift(2*RIGHT)
fr = Dot().shift(UP+3*RIGHT)
fl = Dot().shift(UP+RIGHT)
rl = Dot().shift(DOWN+RIGHT)
rr = Dot().shift(DOWN+3*RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP+3*RIGHT, np.array(UP+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP+RIGHT, np.array(UP+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN+RIGHT, np.array(DOWN+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN+3*RIGHT, np.array(DOWN+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3*LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(3*LEFT)
left_stick.add_updater(lambda x: x.move_to(3*LEFT+0.4*x_tracker.get_value()*RIGHT+0.4*y_tracker.get_value()*UP))
right_pad = Circle(radius=0.5).move_to(1*LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(1*LEFT)
right_stick.add_updater(lambda x: x.move_to(1*LEFT+0.4*rot_tracker.get_value()*RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl), ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick), ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector), ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
# Full forward
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))
# Semi circle
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2, rate_func=there_and_back),
ApplyMethod(y_tracker.set_value, -1, run_time=2, rate_func=smooth))
# Semi circle
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func=there_and_back),
ApplyMethod(y_tracker.set_value, 1, run_time=2, rate_func=smooth))
# Neutral
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))
# Pure rotation
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Full forward plus rotation
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Neutral
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))
# Move FR
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3*DOWN), ApplyMethod(fr_vector.shift, 0.3*DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3*UP), ApplyMethod(fr_vector.shift, 0.3*UP))
fr_vector.add_updater(updateFRArrow)
# Neutral
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Fade out
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr), FadeOut(chassis),
FadeOut(left_pad), FadeOut(left_stick), FadeOut(right_pad), FadeOut(right_stick),
FadeOut(fr_vector), FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
wheelBase = 10
trackWidth = 10
def calculateVectors(FWD, STR, RCW, gyroAngle):
# Makes the command field-centric.
temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)
STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)
FWD = temp
# Uses inverse kinematics to derive wheel speeds and angles.
R = math.hypot(wheelBase, trackWidth)
A = STR - RCW * (wheelBase / R)
B = STR + RCW * (wheelBase / R)
C = FWD - RCW * (trackWidth / R)
D = FWD + RCW * (trackWidth / R)
fr_ws = math.hypot(B, C)
fl_ws = math.hypot(B, D)
bl_ws = math.hypot(A, D)
br_ws = math.hypot(A, C)
fr_wa = math.atan2(B, C) * 180 / math.pi
fl_wa = math.atan2(B, D) * 180 / math.pi
bl_wa = math.atan2(A, D) * 180 / math.pi
br_wa = math.atan2(A, C) * 180 / math.pi
# Normalize wheel speeds.
max = fr_ws
if fl_ws > max:
max = fl_ws
if bl_ws > max:
max = bl_ws
if br_ws > max:
max = br_ws
if max > 1:
fr_ws /= max
fl_ws /= max
bl_ws /= max
br_ws /= max
return np.array([[fr_ws, fr_wa],
[fl_ws, fl_wa],
[bl_ws, bl_wa],
[br_ws, br_wa]])
|
flexible
|
{
"blob_id": "bdde3a3725510d4a83b09421e4b8538a38e29584",
"index": 8196,
"step-1": "<mask token>\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\n<mask token>\n\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n R = math.hypot(wheelBase, trackWidth)\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,\n br_wa]])\n",
"step-3": "<mask token>\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\nwheelBase = 10\ntrackWidth = 10\n\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n R = math.hypot(wheelBase, trackWidth)\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,\n br_wa]])\n",
"step-4": "from manimlib.imports import *\nimport math\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\nwheelBase = 10\ntrackWidth = 10\n\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n R = math.hypot(wheelBase, trackWidth)\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,\n br_wa]])\n",
"step-5": "from manimlib.imports import *\nimport math\n\nclass A_Swerve(Scene):\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY, fill_opacity=1).shift(2*RIGHT)\n\n fr = Dot().shift(UP+3*RIGHT)\n fl = Dot().shift(UP+RIGHT)\n rl = Dot().shift(DOWN+RIGHT)\n rr = Dot().shift(DOWN+3*RIGHT)\n\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP+3*RIGHT, np.array(UP+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n \n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP+RIGHT, np.array(UP+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN+RIGHT, np.array(DOWN+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN+3*RIGHT, np.array(DOWN+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n\n left_pad = Circle(radius=0.5).move_to(3*LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(3*LEFT)\n left_stick.add_updater(lambda x: x.move_to(3*LEFT+0.4*x_tracker.get_value()*RIGHT+0.4*y_tracker.get_value()*UP))\n\n right_pad = Circle(radius=0.5).move_to(1*LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(1*LEFT)\n right_stick.add_updater(lambda x: x.move_to(1*LEFT+0.4*rot_tracker.get_value()*RIGHT))\n\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl), ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick), ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector), ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n # Full forward\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))\n # Semi circle\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2, rate_func=there_and_back), \n ApplyMethod(y_tracker.set_value, -1, run_time=2, rate_func=smooth))\n # Semi circle\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func=there_and_back), \n ApplyMethod(y_tracker.set_value, 1, run_time=2, rate_func=smooth))\n # Neutral\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))\n # Pure rotation\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))\n # Full forward plus rotation\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))\n # Neutral\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))\n # Move FR\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3*DOWN), ApplyMethod(fr_vector.shift, 0.3*DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3*UP), ApplyMethod(fr_vector.shift, 0.3*UP))\n fr_vector.add_updater(updateFRArrow)\n # Neutral\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))\n # Fade out\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr), FadeOut(chassis),\n FadeOut(left_pad), FadeOut(left_stick), FadeOut(right_pad), FadeOut(right_stick),\n FadeOut(fr_vector), FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\nwheelBase = 10\ntrackWidth = 10\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n\n # Makes the command field-centric.\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n\n # Uses inverse kinematics to derive wheel speeds and angles.\n R = math.hypot(wheelBase, trackWidth)\n\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n\n # Normalize wheel speeds.\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n\n return np.array([[fr_ws, fr_wa], \n [fl_ws, fl_wa], \n [bl_ws, bl_wa], \n [br_ws, br_wa]])\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = '{0}_{1}.markdown'
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding='utf-8') as f:
return markdown.markdown(f.read())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = '{0}_{1}.markdown'
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding='utf-8') as f:
return markdown.markdown(f.read())
<|reserved_special_token_1|>
import os
import json
import codecs
import markdown
from flask import current_app
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = '{0}_{1}.markdown'
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding='utf-8') as f:
return markdown.markdown(f.read())
<|reserved_special_token_1|>
import os
import json
import codecs
import markdown
from flask import current_app
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = "{0}_{1}.markdown"
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding="utf-8") as f:
return markdown.markdown(f.read())
|
flexible
|
{
"blob_id": "213ab22a269abc8180524462a8966e5d929ef7d1",
"index": 322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n",
"step-3": "<mask token>\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n",
"step-4": "import os\nimport json\nimport codecs\nimport markdown\nfrom flask import current_app\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n",
"step-5": "import os\nimport json\nimport codecs\n\nimport markdown\n\nfrom flask import current_app\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n\n filename_temp = \"{0}_{1}.markdown\"\n\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n\n if not os.path.isfile(filepath):\n return None\n\n with codecs.open(filepath, mode='r', encoding=\"utf-8\") as f:\n return markdown.markdown(f.read())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.1.1 on 2020-10-14 16:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Store', '0004_remove_product_mcat'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='main_cat',
),
migrations.AddField(
model_name='category',
name='main_cat',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='Store.maincategory'),
),
]
|
normal
|
{
"blob_id": "ec39dae7217ddc48b1ab5163d234542cb36c1d48",
"index": 5351,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Store', '0004_remove_product_mcat')]\n operations = [migrations.RemoveField(model_name='category', name=\n 'main_cat'), migrations.AddField(model_name='category', name=\n 'main_cat', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.SET_NULL, to='Store.maincategory'))\n ]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Store', '0004_remove_product_mcat')]\n operations = [migrations.RemoveField(model_name='category', name=\n 'main_cat'), migrations.AddField(model_name='category', name=\n 'main_cat', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.SET_NULL, to='Store.maincategory'))\n ]\n",
"step-5": "# Generated by Django 3.1.1 on 2020-10-14 16:26\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Store', '0004_remove_product_mcat'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='category',\n name='main_cat',\n ),\n migrations.AddField(\n model_name='category',\n name='main_cat',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='Store.maincategory'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
import numpy as np
import matplotlib.pyplot as plt
def test_func(x):
# x is vector; here of length 1
x = x[0]
return math.cos(x) * x**2 + x
def run_smac(max_fun=30):
from smac.facade.func_facade import fmin_smac
x, cost, smac = fmin_smac(func=test_func,
x0=[-0], # default values
bounds=[(-5, 5)], # bounds of each x
maxfun=max_fun, # maximal number of function evaluations
rng=1234 # random seed
)
runhistory = smac.get_runhistory()
# extract x value and corresponding y value
x_smac = []
y_smac = []
for entry in runhistory.data: # iterate over data because it is an OrderedDict
config_id = entry.config_id # look up config id
config = runhistory.ids_config[config_id] # look up config
y_ = runhistory.get_cost(config) # get cost
x_ = config["x1"] # there is only one entry in our example
x_smac.append(x_)
y_smac.append(y_)
x_smac = np.array(x_smac)
y_smac = np.array(y_smac)
return smac, x_smac, y_smac
def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):
"""
plot function with all evaluated points,
EI acquisition function
Predictions with uncertainties
"""
from smac.optimizer.acquisition import EI
# cost all points for x
step = step or len(x_smac)
x_smac_ = np.array([[x] for x in x_smac[:step]])
y_smac_ = np.array([[y] for y in y_smac[:step]])
# as an alternative, we could extract the points from the runhistory again
# but these points will be scaled to a unit-hypercube
# X, Y = smac.solver.rh2EPM.transform(runhistory)
model.train(x_smac_, y_smac_)
acq_func = EI(model=model)
acq_func.update(model=model, eta=np.min(y_smac))
x_points_ = np.array([[x] for x in x_points])
acq_values = acq_func._compute(X=x_points_)[:, 0]
# plot acquisition function
y_mean, y_var = model.predict(x_points_)
y_mean = y_mean[:, 0]
y_std = np.sqrt(y_var)[:, 0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, acq_values)
plt.title("Aquisition Function")
plt.savefig('fig%da.pdf' % step)
# plot uncertainties
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, y_mean)
ax1.fill_between(x_points, y_mean - y_std,
y_mean + y_std, alpha=0.5)
ax1.plot(x_smac[:step], y_smac[:step], 'bo')
ax1.plot(x_smac[:step], y_smac[:step], 'ro')
ax1.plot(x_points, y_points, '--')
plt.title("Uncertainty Predictions")
plt.savefig('fig%db.pdf' % step)
def clean_smac_shit():
import os
import shutil
for f in os.listdir('.'):
if f.startswith('smac3-output_'):
shutil.rmtree(f)
if __name__ == '__main__':
from smac.epm.rf_with_instances import RandomForestWithInstances
x_points = np.linspace(start=-5, stop=5, num=100)
y_points = list(map(test_func, map(lambda x: [x], x_points)))
smac, x_smac, y_smac = run_smac()
types, bounds = np.array([0]), np.array([[0.0, 1.0]])
model = RandomForestWithInstances(types=types,
bounds=bounds,
instance_features=None,
seed=12345,
pca_components=12345,
ratio_features=1,
num_trees=1000,
min_samples_split=1,
min_samples_leaf=1,
max_depth=100000,
do_bootstrapping=False,
n_points_per_tree=-1,
eps_purity=0
)
for i in range(10):
plot_state(smac, model, x_points, y_points, x_smac, y_smac, i+1)
clean_smac_shit()
|
normal
|
{
"blob_id": "90218168841dc76febab67d1e992dfc993730ea4",
"index": 2455,
"step-1": "<mask token>\n\n\ndef run_smac(max_fun=30):\n from smac.facade.func_facade import fmin_smac\n x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],\n maxfun=max_fun, rng=1234)\n runhistory = smac.get_runhistory()\n x_smac = []\n y_smac = []\n for entry in runhistory.data:\n config_id = entry.config_id\n config = runhistory.ids_config[config_id]\n y_ = runhistory.get_cost(config)\n x_ = config['x1']\n x_smac.append(x_)\n y_smac.append(y_)\n x_smac = np.array(x_smac)\n y_smac = np.array(y_smac)\n return smac, x_smac, y_smac\n\n\ndef plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n \"\"\"\n plot function with all evaluated points,\n EI acquisition function\n Predictions with uncertainties\n \"\"\"\n from smac.optimizer.acquisition import EI\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n model.train(x_smac_, y_smac_)\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title('Aquisition Function')\n plt.savefig('fig%da.pdf' % step)\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title('Uncertainty Predictions')\n plt.savefig('fig%db.pdf' % step)\n\n\ndef clean_smac_shit():\n import os\n import shutil\n for f in os.listdir('.'):\n if f.startswith('smac3-output_'):\n shutil.rmtree(f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_func(x):\n x = x[0]\n return math.cos(x) * x ** 2 + x\n\n\ndef run_smac(max_fun=30):\n from smac.facade.func_facade import fmin_smac\n x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],\n maxfun=max_fun, rng=1234)\n runhistory = smac.get_runhistory()\n x_smac = []\n y_smac = []\n for entry in runhistory.data:\n config_id = entry.config_id\n config = runhistory.ids_config[config_id]\n y_ = runhistory.get_cost(config)\n x_ = config['x1']\n x_smac.append(x_)\n y_smac.append(y_)\n x_smac = np.array(x_smac)\n y_smac = np.array(y_smac)\n return smac, x_smac, y_smac\n\n\ndef plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n \"\"\"\n plot function with all evaluated points,\n EI acquisition function\n Predictions with uncertainties\n \"\"\"\n from smac.optimizer.acquisition import EI\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n model.train(x_smac_, y_smac_)\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title('Aquisition Function')\n plt.savefig('fig%da.pdf' % step)\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title('Uncertainty Predictions')\n plt.savefig('fig%db.pdf' % step)\n\n\ndef clean_smac_shit():\n import os\n import shutil\n for f in os.listdir('.'):\n if f.startswith('smac3-output_'):\n shutil.rmtree(f)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_func(x):\n x = x[0]\n return math.cos(x) * x ** 2 + x\n\n\ndef run_smac(max_fun=30):\n from smac.facade.func_facade import fmin_smac\n x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],\n maxfun=max_fun, rng=1234)\n runhistory = smac.get_runhistory()\n x_smac = []\n y_smac = []\n for entry in runhistory.data:\n config_id = entry.config_id\n config = runhistory.ids_config[config_id]\n y_ = runhistory.get_cost(config)\n x_ = config['x1']\n x_smac.append(x_)\n y_smac.append(y_)\n x_smac = np.array(x_smac)\n y_smac = np.array(y_smac)\n return smac, x_smac, y_smac\n\n\ndef plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n \"\"\"\n plot function with all evaluated points,\n EI acquisition function\n Predictions with uncertainties\n \"\"\"\n from smac.optimizer.acquisition import EI\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n model.train(x_smac_, y_smac_)\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title('Aquisition Function')\n plt.savefig('fig%da.pdf' % step)\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title('Uncertainty Predictions')\n plt.savefig('fig%db.pdf' % step)\n\n\ndef clean_smac_shit():\n import os\n import shutil\n for f in os.listdir('.'):\n if f.startswith('smac3-output_'):\n shutil.rmtree(f)\n\n\nif __name__ == '__main__':\n from smac.epm.rf_with_instances import RandomForestWithInstances\n x_points = np.linspace(start=-5, stop=5, num=100)\n y_points = list(map(test_func, map(lambda x: [x], x_points)))\n smac, x_smac, y_smac = run_smac()\n types, bounds = np.array([0]), np.array([[0.0, 1.0]])\n model = RandomForestWithInstances(types=types, bounds=bounds,\n instance_features=None, seed=12345, pca_components=12345,\n ratio_features=1, num_trees=1000, min_samples_split=1,\n min_samples_leaf=1, max_depth=100000, do_bootstrapping=False,\n n_points_per_tree=-1, eps_purity=0)\n for i in range(10):\n plot_state(smac, model, x_points, y_points, x_smac, y_smac, i + 1)\n clean_smac_shit()\n",
"step-4": "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef test_func(x):\n x = x[0]\n return math.cos(x) * x ** 2 + x\n\n\ndef run_smac(max_fun=30):\n from smac.facade.func_facade import fmin_smac\n x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],\n maxfun=max_fun, rng=1234)\n runhistory = smac.get_runhistory()\n x_smac = []\n y_smac = []\n for entry in runhistory.data:\n config_id = entry.config_id\n config = runhistory.ids_config[config_id]\n y_ = runhistory.get_cost(config)\n x_ = config['x1']\n x_smac.append(x_)\n y_smac.append(y_)\n x_smac = np.array(x_smac)\n y_smac = np.array(y_smac)\n return smac, x_smac, y_smac\n\n\ndef plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n \"\"\"\n plot function with all evaluated points,\n EI acquisition function\n Predictions with uncertainties\n \"\"\"\n from smac.optimizer.acquisition import EI\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n model.train(x_smac_, y_smac_)\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title('Aquisition Function')\n plt.savefig('fig%da.pdf' % step)\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title('Uncertainty Predictions')\n plt.savefig('fig%db.pdf' % step)\n\n\ndef clean_smac_shit():\n import os\n import shutil\n for f in os.listdir('.'):\n if f.startswith('smac3-output_'):\n shutil.rmtree(f)\n\n\nif __name__ == '__main__':\n from smac.epm.rf_with_instances import RandomForestWithInstances\n x_points = np.linspace(start=-5, stop=5, num=100)\n y_points = list(map(test_func, map(lambda x: [x], x_points)))\n smac, x_smac, y_smac = run_smac()\n types, bounds = np.array([0]), np.array([[0.0, 1.0]])\n model = RandomForestWithInstances(types=types, bounds=bounds,\n instance_features=None, seed=12345, pca_components=12345,\n ratio_features=1, num_trees=1000, min_samples_split=1,\n min_samples_leaf=1, max_depth=100000, do_bootstrapping=False,\n n_points_per_tree=-1, eps_purity=0)\n for i in range(10):\n plot_state(smac, model, x_points, y_points, x_smac, y_smac, i + 1)\n clean_smac_shit()\n",
"step-5": "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef test_func(x):\n # x is vector; here of length 1\n x = x[0]\n return math.cos(x) * x**2 + x\n\n\ndef run_smac(max_fun=30):\n from smac.facade.func_facade import fmin_smac\n\n x, cost, smac = fmin_smac(func=test_func,\n x0=[-0], # default values\n bounds=[(-5, 5)], # bounds of each x\n maxfun=max_fun, # maximal number of function evaluations\n rng=1234 # random seed\n )\n\n runhistory = smac.get_runhistory()\n\n # extract x value and corresponding y value\n x_smac = []\n y_smac = []\n for entry in runhistory.data: # iterate over data because it is an OrderedDict\n config_id = entry.config_id # look up config id\n config = runhistory.ids_config[config_id] # look up config\n y_ = runhistory.get_cost(config) # get cost\n x_ = config[\"x1\"] # there is only one entry in our example\n x_smac.append(x_)\n y_smac.append(y_)\n x_smac = np.array(x_smac)\n y_smac = np.array(y_smac)\n\n return smac, x_smac, y_smac\n\n\ndef plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n \"\"\"\n plot function with all evaluated points,\n EI acquisition function\n Predictions with uncertainties\n \"\"\"\n from smac.optimizer.acquisition import EI\n\n # cost all points for x\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n # as an alternative, we could extract the points from the runhistory again\n # but these points will be scaled to a unit-hypercube\n # X, Y = smac.solver.rh2EPM.transform(runhistory)\n\n model.train(x_smac_, y_smac_)\n\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n\n # plot acquisition function\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title(\"Aquisition Function\")\n\n plt.savefig('fig%da.pdf' % step)\n\n # plot uncertainties\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std,\n y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title(\"Uncertainty Predictions\")\n\n plt.savefig('fig%db.pdf' % step)\n\n\ndef clean_smac_shit():\n import os\n import shutil\n for f in os.listdir('.'):\n if f.startswith('smac3-output_'):\n shutil.rmtree(f)\n\n\nif __name__ == '__main__':\n from smac.epm.rf_with_instances import RandomForestWithInstances\n\n x_points = np.linspace(start=-5, stop=5, num=100)\n y_points = list(map(test_func, map(lambda x: [x], x_points)))\n\n smac, x_smac, y_smac = run_smac()\n\n types, bounds = np.array([0]), np.array([[0.0, 1.0]])\n model = RandomForestWithInstances(types=types,\n bounds=bounds,\n instance_features=None,\n seed=12345,\n pca_components=12345,\n ratio_features=1,\n num_trees=1000,\n min_samples_split=1,\n min_samples_leaf=1,\n max_depth=100000,\n do_bootstrapping=False,\n n_points_per_tree=-1,\n eps_purity=0\n )\n\n for i in range(10):\n plot_state(smac, model, x_points, y_points, x_smac, y_smac, i+1)\n\n clean_smac_shit()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class SpriteMoveTo(SpriteLayer):
<|reserved_special_token_0|>
class FontLayer(Layer):
def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,
y=director.get_window_size()[1], anchor_x='left', anchor_y=
'top', batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=
True, width=600, font_size=16, x=5, y=director.get_window_size(
)[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)
self.text_help = pyglet.text.Label(
'Press LEFT / RIGHT for prev/next test, ENTER to restart test',
font_size=16, x=director.get_window_size()[0] // 2, y=20,
anchor_x='center', anchor_y='center', batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpriteLayer(Layer):
is_event_handler = True
def __init__(self, index=1):
super(SpriteLayer, self).__init__()
self.index = index
self.image = pyglet.resource.image('flat-black-l.png')
self.image.anchor_x = self.image.width
self.image.anchor_y = self.image.height
def on_key_release(self, keys, mod):
max_steps = 8
if keys == key.LEFT:
self.index -= 1
if self.index < 0:
self.index = max_steps - 1
elif keys == key.RIGHT:
self.index += 1
if self.index >= 8:
self.index = 0
if keys in (key.LEFT, key.RIGHT, key.ENTER):
director.replace(get_steps(self.index))
return True
class SpriteMoveTo(SpriteLayer):
def on_enter(self):
super(SpriteMoveTo, self).on_enter()
sprite3 = Sprite(self.image)
self.add(sprite3)
x, y = divmod(self.index, 3)
sprite3.position = x * 100 + 100, y * 100 + 100
class FontLayer(Layer):
def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,
y=director.get_window_size()[1], anchor_x='left', anchor_y=
'top', batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=
True, width=600, font_size=16, x=5, y=director.get_window_size(
)[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)
self.text_help = pyglet.text.Label(
'Press LEFT / RIGHT for prev/next test, ENTER to restart test',
font_size=16, x=director.get_window_size()[0] // 2, y=20,
anchor_x='center', anchor_y='center', batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_steps(index):
return Scene(FontLayer(title='', subtitle='\n'.join(generate_haiku())),
SpriteMoveTo(index))
class SpriteLayer(Layer):
is_event_handler = True
def __init__(self, index=1):
super(SpriteLayer, self).__init__()
self.index = index
self.image = pyglet.resource.image('flat-black-l.png')
self.image.anchor_x = self.image.width
self.image.anchor_y = self.image.height
def on_key_release(self, keys, mod):
max_steps = 8
if keys == key.LEFT:
self.index -= 1
if self.index < 0:
self.index = max_steps - 1
elif keys == key.RIGHT:
self.index += 1
if self.index >= 8:
self.index = 0
if keys in (key.LEFT, key.RIGHT, key.ENTER):
director.replace(get_steps(self.index))
return True
class SpriteMoveTo(SpriteLayer):
def on_enter(self):
super(SpriteMoveTo, self).on_enter()
sprite3 = Sprite(self.image)
self.add(sprite3)
x, y = divmod(self.index, 3)
sprite3.position = x * 100 + 100, y * 100 + 100
class FontLayer(Layer):
def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,
y=director.get_window_size()[1], anchor_x='left', anchor_y=
'top', batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=
True, width=600, font_size=16, x=5, y=director.get_window_size(
)[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)
self.text_help = pyglet.text.Label(
'Press LEFT / RIGHT for prev/next test, ENTER to restart test',
font_size=16, x=director.get_window_size()[0] // 2, y=20,
anchor_x='center', anchor_y='center', batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
<|reserved_special_token_0|>
def get_steps(index):
return Scene(FontLayer(title='', subtitle='\n'.join(generate_haiku())),
SpriteMoveTo(index))
class SpriteLayer(Layer):
is_event_handler = True
def __init__(self, index=1):
super(SpriteLayer, self).__init__()
self.index = index
self.image = pyglet.resource.image('flat-black-l.png')
self.image.anchor_x = self.image.width
self.image.anchor_y = self.image.height
def on_key_release(self, keys, mod):
max_steps = 8
if keys == key.LEFT:
self.index -= 1
if self.index < 0:
self.index = max_steps - 1
elif keys == key.RIGHT:
self.index += 1
if self.index >= 8:
self.index = 0
if keys in (key.LEFT, key.RIGHT, key.ENTER):
director.replace(get_steps(self.index))
return True
class SpriteMoveTo(SpriteLayer):
def on_enter(self):
super(SpriteMoveTo, self).on_enter()
sprite3 = Sprite(self.image)
self.add(sprite3)
x, y = divmod(self.index, 3)
sprite3.position = x * 100 + 100, y * 100 + 100
class FontLayer(Layer):
def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,
y=director.get_window_size()[1], anchor_x='left', anchor_y=
'top', batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=
True, width=600, font_size=16, x=5, y=director.get_window_size(
)[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)
self.text_help = pyglet.text.Label(
'Press LEFT / RIGHT for prev/next test, ENTER to restart test',
font_size=16, x=director.get_window_size()[0] // 2, y=20,
anchor_x='center', anchor_y='center', batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
if __name__ == '__main__':
director.init(resizable=True, caption='SuperStepper')
director.run(get_steps(1))
<|reserved_special_token_1|>
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyglet.gl import *
from pyglet.window import key
from cocos.actions import *
from cocos.director import director
from cocos.layer import Layer
from cocos.scene import Scene
from cocos.sprite import Sprite
from haiku import generate_haiku
from time import time
def get_steps(index):
return Scene(FontLayer(title="", subtitle='\n'.join(generate_haiku())), SpriteMoveTo(index))
class SpriteLayer(Layer):
is_event_handler = True #: enable pyglet's events
def __init__(self, index=1):
super(SpriteLayer, self).__init__()
self.index = index
self.image = pyglet.resource.image('flat-black-l.png')
self.image.anchor_x = self.image.width
self.image.anchor_y = self.image.height
def on_key_release(self, keys, mod):
# LEFT: go to previous scene
# RIGTH: go to next scene
# ENTER: restart scene
max_steps = 8
if keys == key.LEFT:
self.index -= 1
if self.index < 0:
self.index = max_steps - 1
elif keys == key.RIGHT:
self.index += 1
if self.index >= 8:
self.index = 0
if keys in (key.LEFT, key.RIGHT, key.ENTER):
director.replace(get_steps(self.index))
return True
# def on_exit( self ):
# for o in self.objects:
# o.stop()
class SpriteMoveTo(SpriteLayer):
def on_enter(self):
super(SpriteMoveTo, self).on_enter()
sprite3 = Sprite(self.image)
self.add(sprite3)
x, y = divmod(self.index, 3)
sprite3.position = x * 100 +100 , y * 100 + 100
# sprite3.do(MoveTo((620, 300), 1))
class FontLayer(Layer):
def __init__(self, title="Sprite Exmaple #", subtitle="Goto()"):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title,
font_size=32,
x=5,
y=director.get_window_size()[1],
anchor_x='left',
anchor_y='top',
batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle,
multiline=True,
width=600,
font_size=16,
x=5,
y=director.get_window_size()[1] - 80,
anchor_x='left',
anchor_y='top',
batch=self.batch)
self.text_help = pyglet.text.Label("Press LEFT / RIGHT for prev/next test, "
"ENTER to restart test",
font_size=16,
x=director.get_window_size()[0] // 2,
y=20,
anchor_x='center',
anchor_y='center',
batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
if __name__ == "__main__":
director.init(resizable=True, caption='SuperStepper')
director.run(get_steps(1))
|
flexible
|
{
"blob_id": "2678aac08104a580e866984bc4cf4adf8cb8ac5c",
"index": 5930,
"step-1": "<mask token>\n\n\nclass SpriteMoveTo(SpriteLayer):\n <mask token>\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_steps(index):\n return Scene(FontLayer(title='', subtitle='\\n'.join(generate_haiku())),\n SpriteMoveTo(index))\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\n\n\ndef get_steps(index):\n return Scene(FontLayer(title='', subtitle='\\n'.join(generate_haiku())),\n SpriteMoveTo(index))\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\nif __name__ == '__main__':\n director.init(resizable=True, caption='SuperStepper')\n director.run(get_steps(1))\n",
"step-5": "from __future__ import division, print_function, unicode_literals\n\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n\nfrom pyglet.gl import *\nfrom pyglet.window import key\n\nfrom cocos.actions import *\nfrom cocos.director import director\nfrom cocos.layer import Layer\nfrom cocos.scene import Scene\nfrom cocos.sprite import Sprite\nfrom haiku import generate_haiku\n\nfrom time import time\n\ndef get_steps(index):\n \n return Scene(FontLayer(title=\"\", subtitle='\\n'.join(generate_haiku())), SpriteMoveTo(index))\n\nclass SpriteLayer(Layer):\n\n is_event_handler = True #: enable pyglet's events\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n # LEFT: go to previous scene\n # RIGTH: go to next scene\n # ENTER: restart scene\n max_steps = 8\n\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n # def on_exit( self ):\n # for o in self.objects:\n # o.stop()\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n\n sprite3.position = x * 100 +100 , y * 100 + 100\n # sprite3.do(MoveTo((620, 300), 1))\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title=\"Sprite Exmaple #\", subtitle=\"Goto()\"):\n super(FontLayer, self).__init__()\n\n self.title = title\n self.subtitle = subtitle\n\n self.batch = pyglet.graphics.Batch()\n\n self.text_title = pyglet.text.Label(self.title,\n font_size=32,\n x=5,\n y=director.get_window_size()[1],\n anchor_x='left',\n anchor_y='top',\n batch=self.batch)\n\n self.text_subtitle = pyglet.text.Label(self.subtitle,\n multiline=True,\n width=600,\n font_size=16,\n x=5,\n y=director.get_window_size()[1] - 80,\n anchor_x='left',\n anchor_y='top',\n batch=self.batch)\n\n self.text_help = pyglet.text.Label(\"Press LEFT / RIGHT for prev/next test, \"\n \"ENTER to restart test\",\n font_size=16,\n x=director.get_window_size()[0] // 2,\n y=20,\n anchor_x='center',\n anchor_y='center',\n batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n\nif __name__ == \"__main__\":\n director.init(resizable=True, caption='SuperStepper')\n director.run(get_steps(1))",
"step-ids": [
4,
9,
10,
11,
13
]
}
|
[
4,
9,
10,
11,
13
] |
import sys
import unittest
import random
from k_order_statistic import k_order_statistic
test_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],
1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,
3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys
.maxsize), ([True, 10], 1, 10)]
test_case_value = [[], [1, 'a', None, True], ['asd', True]]
class TestKOrderStatistic(unittest.TestCase):
def test_find(self):
for a, k, ans in test_case_find:
self.assertEqual(k_order_statistic(a, k), ans)
def test_values(self):
for a in test_case_value:
self.assertRaises(TypeError, k_order_statistic, (a, random.
randint(0, 10)))
for a, k, ans in test_case_find:
self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))
|
normal
|
{
"blob_id": "b93cd5ad957da37b1a4cca1d465a67723110e926",
"index": 2813,
"step-1": "<mask token>\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n\n def test_values(self):\n for a in test_case_value:\n self.assertRaises(TypeError, k_order_statistic, (a, random.\n randint(0, 10)))\n for a, k, ans in test_case_find:\n self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))\n",
"step-3": "<mask token>\ntest_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],\n 1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,\n 3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys\n .maxsize), ([True, 10], 1, 10)]\ntest_case_value = [[], [1, 'a', None, True], ['asd', True]]\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n\n def test_values(self):\n for a in test_case_value:\n self.assertRaises(TypeError, k_order_statistic, (a, random.\n randint(0, 10)))\n for a, k, ans in test_case_find:\n self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))\n",
"step-4": "import sys\nimport unittest\nimport random\nfrom k_order_statistic import k_order_statistic\ntest_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],\n 1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,\n 3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys\n .maxsize), ([True, 10], 1, 10)]\ntest_case_value = [[], [1, 'a', None, True], ['asd', True]]\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n\n def test_values(self):\n for a in test_case_value:\n self.assertRaises(TypeError, k_order_statistic, (a, random.\n randint(0, 10)))\n for a, k, ans in test_case_find:\n self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, HttpResponseRedirect, Http404
from django.contrib.auth import authenticate, login, logout
from accounts.forms import RegistrationForm, LoginForm, StudentDetailsForm, companyDetailsForm, SocietyDetailsForm
from accounts.models import MyUser, studentData, CompanyData, SoietyData
from accounts.helper_functions import password_check, email_check
# Create your views here.
def login_page(request):
if request.user.is_authenticated():
return HttpResponseRedirect("/")
else:
form = LoginForm(request.POST or None)
next_url = request.GET.get('next')
if form.is_valid():
username = form.cleaned_data['email']
password = form.cleaned_data['password']
print username, password
user = authenticate(username=username, password=password)
if user is not None:
try:
user_details = studentData.objects.get(id=user.id)
login(request, user)
return HttpResponseRedirect('/home')
except ObjectDoesNotExist:
account = MyUser.objects.get(id=user.id)
account_type = account.get_account_tyoe()
return HttpResponseRedirect("complete_registration/" + account_type +"/"+str(user.id))
context = {
"form": form
}
return render(request, "generalPages/loginpage.html", context)
def register_page(request):
# if request.user.is_authenticated():
# return HttpResponseRedirect("/")
# else:
# form = RegistrationForm(request.POST or None)
# context = {
# "form": RegistrationForm(),
# "action_value_society": "register/society",
# "action_value_student": "register/student",
# "action_value_company": "register/company",
# "submit_btn_value": "Register"
#
# }
# return render(request, "generalPages/register.html", context)
return render(request, "generalPages/register.html")
def student_reg(request):
# if request.user.is_authenticated():
# return HttpResponseRedirect("/")
# else:
# form = RegistrationForm(request.POST or None)
# print form
#
# if form.is_valid():
# email = form.cleaned_data["email"]
# password = form.cleaned_data["password2"]
#
# print email + password
#
# user = MyUser.objects.create_user(email=email, password=password, userType="student")
# #todo: send out confirmation email
#
#
# # get the ID so i can pass it in the URL to the complete registration page
# user_id = user.id
# return HttpResponseRedirect("/complete_registration/student/" + str(user_id))
#
# else:
# #todo: change this that it raises username already in use error
# print "form is invalid"
# # todo: add a parameter that tells them, the username or password was incorrect
# return HttpResponseRedirect("/register")
return render(request, "student/CompleteStudentRegistration.html")
def company_reg(request):
# if request.user.is_authenticated():
# return HttpResponseRedirect("/")
# else:
# form = RegistrationForm(request.POST or None)
# print form
#
# if form.is_valid():
# email = form.cleaned_data["email"]
# password = form.cleaned_data["password2"]
#
# print email + password
#
# user = MyUser.objects.create_user(email=email, password=password, userType="company")
# # todo: send out confirmation email
#
# # get the ID so i can pass it in the URL to the complete registration page
# user_id = user.id
# return HttpResponseRedirect("/complete_registration/company/" + str(user_id))
#
# else:
# print "form is invalid"
# # todo: add a parameter that tells them, the username or password was incorrect
# return HttpResponseRedirect("/register")
return render(request, "company/completeCompanyregistration.html")
def society_reg(request):
# if request.user.is_authenticated():
# return HttpResponseRedirect("/")
# else:
# form = RegistrationForm(request.POST or None)
# print form
#
# if form.is_valid():
# email = form.cleaned_data["email"]
# password = form.cleaned_data["password2"]
#
# print email + password
#
# user = MyUser.objects.create_user(email=email, password=password, userType="society")
# # todo: send out confirmation email
#
# # get the ID so i can pass it in the URL to the complete registration page
# user_id = user.id
# return HttpResponseRedirect("/complete_registration/society/" + str(user_id))
#
# else:
# print "form is invalid"
# # todo: add a parameter that tells them, the username or password was incorrect
# return HttpResponseRedirect("/register")
return render(request, "society/completeSocietyRegistration.html")
def complete_student_registration(request):
print request.POST
return HttpResponseRedirect("/")
# # check if the id is the one that matchest to their email:
#
#
# # print "in their"
# # print request
# #
# # return HttpResponseRedirect("/")
# if request.user.is_authenticated():
# return HttpResponseRedirect("/")
# else:
# try:
# user = MyUser.objects.get(id=id)
#
# except ObjectDoesNotExist:
# return HttpResponseRedirect("/register")
# except:
# return HttpResponseRedirect("/login")
#
# try:
# user_details = studentData.objects.get(id=id)
# login(request, user)
# return HttpResponseRedirect('/home')
# except ObjectDoesNotExist:
#
# if user.user_type == 'student':
# form = StudentDetailsForm(request.POST or None)
#
# if form.is_valid():
# f_name = form.cleaned_data["first_name"]
# s_name= form.cleaned_data["surname"]
# studyCunt = form.cleaned_data["countryOfStudy"]
# course= form.cleaned_data['course']
# university = form.cleaned_data['university']
#
# studentData.objects.create(id=user, first_name=f_name, surname=s_name,
# countryOfStudy=studyCunt, course=course, university=university)
# login(request, user)
# return HttpResponseRedirect("/home")
# # else:
# # print "form is invalid"
# context = {
# "form": StudentDetailsForm(),
#
# }
# return render(request, "student/CompleteStudentRegistration.html", context)
#
# pass
# else:
# return HttpResponseRedirect('/login')
# except:
# return HttpResponseRedirect("/404")
def complete_company_registration(request, id):
# check if the id is the one that matchest to their email:
# print "in their"
# print request
#
# return HttpResponseRedirect("/")
if request.user.is_authenticated():
return HttpResponseRedirect("/")
else:
try:
user = MyUser.objects.get(id=id)
except ObjectDoesNotExist:
return HttpResponseRedirect("/register")
except:
return HttpResponseRedirect("/login")
try:
user_details = CompanyData.objects.get(id=id)
login(request, user)
return HttpResponseRedirect('/company_home')
except ObjectDoesNotExist:
if user.user_type == 'company':
form = companyDetailsForm(request.POST or None)
if form.is_valid():
print "there"
company_name = form.cleaned_data["company_name"]
website = form.cleaned_data["company_website"]
city = form.cleaned_data["HQ_city"]
industry = form.cleaned_data["industry"]
CompanyData.objects.create(id=user, Company_name=company_name, company_website=website,
HQ_city=city, description=None, industry=industry)
login(request, user)
return HttpResponseRedirect("/company_home")
# else:
# print "form is invalid"
context = {
"form": companyDetailsForm(),
}
return render(request, "company/completeCompanyregistration.html", context)
pass
else:
return HttpResponseRedirect('/login')
except:
return HttpResponseRedirect("/404")
def complete_society_registration(request, id):
print "hey"
if request.user.is_authenticated():
return HttpResponseRedirect("/")
else:
print "ho"
try:
user = MyUser.objects.get(id=id)
except ObjectDoesNotExist:
return HttpResponseRedirect("/register")
except:
return HttpResponseRedirect("/login")
try:
user_details = SoietyData.objects.get(id=id)
login(request, user)
return HttpResponseRedirect('/home')
except ObjectDoesNotExist:
print "lets "
if user.user_type == 'society':
form = SocietyDetailsForm(request.POST or None)
if form.is_valid():
name = form.cleaned_data['society_name']
university = form.cleaned_data['society_university']
fb = form.cleaned_data['society_FB']
website = form.cleaned_data['society_website']
SoietyData.objects.create(id=user, society_name=name, society_university=university,
society_facebook=fb, society_website=website)
login(request, user)
return HttpResponseRedirect("/society_home")
# else:
# print "form is invalid"
context = {
"form": SocietyDetailsForm(),
}
print "go"
return render(request, "society/completeSocietyRegistration.html", context)
else:
return HttpResponseRedirect('/login')
except:
return HttpResponseRedirect("/thisisaknownerror")
def logout_call(request):
logout(request)
return HttpResponseRedirect('/')
|
normal
|
{
"blob_id": "7f21fcc1265be8b3263971a4e76470616459f433",
"index": 6061,
"step-1": "from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, HttpResponseRedirect, Http404\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom accounts.forms import RegistrationForm, LoginForm, StudentDetailsForm, companyDetailsForm, SocietyDetailsForm\nfrom accounts.models import MyUser, studentData, CompanyData, SoietyData\nfrom accounts.helper_functions import password_check, email_check\n\n# Create your views here.\n\ndef login_page(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect(\"/\")\n else:\n\n form = LoginForm(request.POST or None)\n next_url = request.GET.get('next')\n\n if form.is_valid():\n username = form.cleaned_data['email']\n password = form.cleaned_data['password']\n print username, password\n\n user = authenticate(username=username, password=password)\n\n if user is not None:\n\n\n try:\n user_details = studentData.objects.get(id=user.id)\n login(request, user)\n return HttpResponseRedirect('/home')\n except ObjectDoesNotExist:\n account = MyUser.objects.get(id=user.id)\n account_type = account.get_account_tyoe()\n return HttpResponseRedirect(\"complete_registration/\" + account_type +\"/\"+str(user.id))\n context = {\n \"form\": form\n }\n return render(request, \"generalPages/loginpage.html\", context)\n\n\ndef register_page(request):\n\n\n # if request.user.is_authenticated():\n # return HttpResponseRedirect(\"/\")\n # else:\n # form = RegistrationForm(request.POST or None)\n # context = {\n # \"form\": RegistrationForm(),\n # \"action_value_society\": \"register/society\",\n # \"action_value_student\": \"register/student\",\n # \"action_value_company\": \"register/company\",\n # \"submit_btn_value\": \"Register\"\n #\n # }\n # return render(request, \"generalPages/register.html\", context)\n\n return render(request, \"generalPages/register.html\")\n\n\ndef student_reg(request):\n # if request.user.is_authenticated():\n # return HttpResponseRedirect(\"/\")\n # else:\n # form = RegistrationForm(request.POST or None)\n # print form\n #\n # if form.is_valid():\n # email = form.cleaned_data[\"email\"]\n # password = form.cleaned_data[\"password2\"]\n #\n # print email + password\n #\n # user = MyUser.objects.create_user(email=email, password=password, userType=\"student\")\n # #todo: send out confirmation email\n #\n #\n # # get the ID so i can pass it in the URL to the complete registration page\n # user_id = user.id\n # return HttpResponseRedirect(\"/complete_registration/student/\" + str(user_id))\n #\n # else:\n # #todo: change this that it raises username already in use error\n # print \"form is invalid\"\n # # todo: add a parameter that tells them, the username or password was incorrect\n # return HttpResponseRedirect(\"/register\")\n return render(request, \"student/CompleteStudentRegistration.html\")\n\n\n\ndef company_reg(request):\n # if request.user.is_authenticated():\n # return HttpResponseRedirect(\"/\")\n # else:\n # form = RegistrationForm(request.POST or None)\n # print form\n #\n # if form.is_valid():\n # email = form.cleaned_data[\"email\"]\n # password = form.cleaned_data[\"password2\"]\n #\n # print email + password\n #\n # user = MyUser.objects.create_user(email=email, password=password, userType=\"company\")\n # # todo: send out confirmation email\n #\n # # get the ID so i can pass it in the URL to the complete registration page\n # user_id = user.id\n # return HttpResponseRedirect(\"/complete_registration/company/\" + str(user_id))\n #\n # else:\n # print \"form is invalid\"\n # # todo: add a parameter that tells them, the username or password was incorrect\n # return HttpResponseRedirect(\"/register\")\n return render(request, \"company/completeCompanyregistration.html\")\n\n\ndef society_reg(request):\n # if request.user.is_authenticated():\n # return HttpResponseRedirect(\"/\")\n # else:\n # form = RegistrationForm(request.POST or None)\n # print form\n #\n # if form.is_valid():\n # email = form.cleaned_data[\"email\"]\n # password = form.cleaned_data[\"password2\"]\n #\n # print email + password\n #\n # user = MyUser.objects.create_user(email=email, password=password, userType=\"society\")\n # # todo: send out confirmation email\n #\n # # get the ID so i can pass it in the URL to the complete registration page\n # user_id = user.id\n # return HttpResponseRedirect(\"/complete_registration/society/\" + str(user_id))\n #\n # else:\n # print \"form is invalid\"\n # # todo: add a parameter that tells them, the username or password was incorrect\n # return HttpResponseRedirect(\"/register\")\n return render(request, \"society/completeSocietyRegistration.html\")\n\n\ndef complete_student_registration(request):\n\n print request.POST\n\n return HttpResponseRedirect(\"/\")\n\n\n # # check if the id is the one that matchest to their email:\n #\n #\n # # print \"in their\"\n # # print request\n # #\n # # return HttpResponseRedirect(\"/\")\n # if request.user.is_authenticated():\n # return HttpResponseRedirect(\"/\")\n # else:\n # try:\n # user = MyUser.objects.get(id=id)\n #\n # except ObjectDoesNotExist:\n # return HttpResponseRedirect(\"/register\")\n # except:\n # return HttpResponseRedirect(\"/login\")\n #\n # try:\n # user_details = studentData.objects.get(id=id)\n # login(request, user)\n # return HttpResponseRedirect('/home')\n # except ObjectDoesNotExist:\n #\n # if user.user_type == 'student':\n # form = StudentDetailsForm(request.POST or None)\n #\n # if form.is_valid():\n # f_name = form.cleaned_data[\"first_name\"]\n # s_name= form.cleaned_data[\"surname\"]\n # studyCunt = form.cleaned_data[\"countryOfStudy\"]\n # course= form.cleaned_data['course']\n # university = form.cleaned_data['university']\n #\n # studentData.objects.create(id=user, first_name=f_name, surname=s_name,\n # countryOfStudy=studyCunt, course=course, university=university)\n # login(request, user)\n # return HttpResponseRedirect(\"/home\")\n # # else:\n # # print \"form is invalid\"\n # context = {\n # \"form\": StudentDetailsForm(),\n #\n # }\n # return render(request, \"student/CompleteStudentRegistration.html\", context)\n #\n # pass\n # else:\n # return HttpResponseRedirect('/login')\n # except:\n # return HttpResponseRedirect(\"/404\")\n\n\n\ndef complete_company_registration(request, id):\n # check if the id is the one that matchest to their email:\n\n\n # print \"in their\"\n # print request\n #\n # return HttpResponseRedirect(\"/\")\n if request.user.is_authenticated():\n return HttpResponseRedirect(\"/\")\n else:\n try:\n user = MyUser.objects.get(id=id)\n\n except ObjectDoesNotExist:\n return HttpResponseRedirect(\"/register\")\n except:\n return HttpResponseRedirect(\"/login\")\n\n try:\n user_details = CompanyData.objects.get(id=id)\n login(request, user)\n return HttpResponseRedirect('/company_home')\n except ObjectDoesNotExist:\n\n if user.user_type == 'company':\n\n form = companyDetailsForm(request.POST or None)\n\n if form.is_valid():\n print \"there\"\n company_name = form.cleaned_data[\"company_name\"]\n website = form.cleaned_data[\"company_website\"]\n city = form.cleaned_data[\"HQ_city\"]\n industry = form.cleaned_data[\"industry\"]\n\n CompanyData.objects.create(id=user, Company_name=company_name, company_website=website,\n HQ_city=city, description=None, industry=industry)\n login(request, user)\n return HttpResponseRedirect(\"/company_home\")\n # else:\n # print \"form is invalid\"\n context = {\n \"form\": companyDetailsForm(),\n\n }\n return render(request, \"company/completeCompanyregistration.html\", context)\n\n pass\n else:\n return HttpResponseRedirect('/login')\n except:\n return HttpResponseRedirect(\"/404\")\n\n\ndef complete_society_registration(request, id):\n print \"hey\"\n if request.user.is_authenticated():\n return HttpResponseRedirect(\"/\")\n else:\n print \"ho\"\n try:\n user = MyUser.objects.get(id=id)\n\n except ObjectDoesNotExist:\n return HttpResponseRedirect(\"/register\")\n except:\n return HttpResponseRedirect(\"/login\")\n\n try:\n user_details = SoietyData.objects.get(id=id)\n login(request, user)\n return HttpResponseRedirect('/home')\n except ObjectDoesNotExist:\n print \"lets \"\n if user.user_type == 'society':\n form = SocietyDetailsForm(request.POST or None)\n\n if form.is_valid():\n name = form.cleaned_data['society_name']\n university = form.cleaned_data['society_university']\n fb = form.cleaned_data['society_FB']\n website = form.cleaned_data['society_website']\n\n SoietyData.objects.create(id=user, society_name=name, society_university=university,\n society_facebook=fb, society_website=website)\n login(request, user)\n return HttpResponseRedirect(\"/society_home\")\n # else:\n # print \"form is invalid\"\n context = {\n \"form\": SocietyDetailsForm(),\n\n }\n print \"go\"\n return render(request, \"society/completeSocietyRegistration.html\", context)\n else:\n return HttpResponseRedirect('/login')\n except:\n return HttpResponseRedirect(\"/thisisaknownerror\")\n\n\n\n\ndef logout_call(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
|
normal
|
{
"blob_id": "ca551d8e55ebb15a03077af5695782c6d72ff2fd",
"index": 8091,
"step-1": "<mask token>\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\n<mask token>\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n print('Provide a username for the config file [current username: {}]:'.\n format(config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n print('Provide a project for the config file [current project: {}]:'.\n format(config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n cfgfile = open('.osfcli.config', 'w')\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To upload a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\n 'Expected source ({}) to be a directory when using recursive mode.'\n .format(args.source))\n _, dir_name = os.path.split(args.source)\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force, update=\n args.update)\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force, update=\n args.update)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n print('Provide a username for the config file [current username: {}]:'.\n format(config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n print('Provide a project for the config file [current project: {}]:'.\n format(config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n cfgfile = open('.osfcli.config', 'w')\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To upload a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\n 'Expected source ({}) to be a directory when using recursive mode.'\n .format(args.source))\n _, dir_name = os.path.split(args.source)\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force, update=\n args.update)\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force, update=\n args.update)\n\n\n@might_need_auth\ndef remove(args):\n \"\"\"Remove a file from the project's storage.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To remove a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.target)\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nfrom functools import wraps\nimport getpass\nimport os\nimport sys\nfrom six.moves import configparser\nfrom six.moves import input\nfrom tqdm import tqdm\nfrom .api import OSF\nfrom .exceptions import UnauthorizedException\nfrom .utils import norm_remote_path, split_storage, makedirs, checksum\n\n\ndef config_from_file():\n if os.path.exists('.osfcli.config'):\n config_ = configparser.ConfigParser()\n config_.read('.osfcli.config')\n config = dict(config_.items('osf'))\n else:\n config = {}\n return config\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n print('Provide a username for the config file [current username: {}]:'.\n format(config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n print('Provide a project for the config file [current project: {}]:'.\n format(config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n cfgfile = open('.osfcli.config', 'w')\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To upload a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\n 'Expected source ({}) to be a directory when using recursive mode.'\n .format(args.source))\n _, dir_name = os.path.split(args.source)\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force, update=\n args.update)\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force, update=\n args.update)\n\n\n@might_need_auth\ndef remove(args):\n \"\"\"Remove a file from the project's storage.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To remove a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.target)\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()\n",
"step-5": "\"\"\"Command line interface to the OSF\n\nThese functions implement the functionality of the command-line interface.\n\"\"\"\nfrom __future__ import print_function\n\nfrom functools import wraps\nimport getpass\nimport os\nimport sys\n\nfrom six.moves import configparser\nfrom six.moves import input\n\nfrom tqdm import tqdm\n\nfrom .api import OSF\nfrom .exceptions import UnauthorizedException\nfrom .utils import norm_remote_path, split_storage, makedirs, checksum\n\n\ndef config_from_file():\n if os.path.exists(\".osfcli.config\"):\n config_ = configparser.ConfigParser()\n config_.read(\".osfcli.config\")\n\n # for python2 compatibility\n config = dict(config_.items('osf'))\n\n else:\n config = {}\n\n return config\n\n\ndef config_from_env(config):\n username = os.getenv(\"OSF_USERNAME\")\n if username is not None:\n config['username'] = username\n\n project = os.getenv(\"OSF_PROJECT\")\n if project is not None:\n config['project'] = project\n\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n # Command line options have precedence over environment variables,\n # which have precedence over the config file.\n config = config_from_env(config_from_file())\n\n username = _get_username(args, config)\n\n project = config.get('project')\n if args.project is None:\n args.project = project\n # still None? We are in trouble\n if args.project is None:\n sys.exit('You have to specify a project ID via the command line,'\n ' configuration file or environment variable.')\n\n password = None\n if username is not None:\n password = os.getenv(\"OSF_PASSWORD\")\n\n # Prompt user when password is not set\n if password is None:\n password = getpass.getpass('Please input your password: ')\n\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n\n if username is None:\n sys.exit(\"Please set a username (run `osf -h` for details).\")\n else:\n sys.exit(\"You are not authorized to access this project.\")\n\n return return_value\n\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n\n with open(path, \"wb\") as f:\n file_.write_to(f)\n\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n\n project = osf.project(args.project)\n\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To upload a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\"Expected source ({}) to be a directory when \"\n \"using recursive mode.\".format(args.source))\n\n # local name of the directory that is being uploaded\n _, dir_name = os.path.split(args.source)\n\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n # build the remote path + fname\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force,\n update=args.update)\n\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force,\n update=args.update)\n\n\n@might_need_auth\ndef remove(args):\n \"\"\"Remove a file from the project's storage.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from orders.models import Setting
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
@login_required
def queue(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': True})
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
|
normal
|
{
"blob_id": "f16d43d9dfb3e9b9589fa92eb82aaa4c73fe48cd",
"index": 1264,
"step-1": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n<mask token>\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n",
"step-3": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n@login_required\ndef queue(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': True})\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n",
"step-4": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom orders.models import Setting\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n@login_required\ndef queue(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': True})\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from numpy import *
import KNN_1
import KNN_3
import KNN_suanfa as clf
def datingClassTest():
horatio = 0.1
data, datalabels = KNN_1.filel2matrix("datingTestSet2.txt")
normMat = KNN_3.autoNorm(data)
ml = normMat.shape[0]
numTestset = int(ml*horatio)
errorcount = 0
a=clf.classify0(normMat[0:numTestset,:],normMat[numTestset:ml,:],3,datalabels[numTestset:ml])
for i in range(len(a)):
if a[i] != datalabels[i]:
errorcount += 1
c = errorcount/100
return c
def predictperson():
level = ['not at all','in small does','in large does']
percenttats = float(input("percentage of time spent playing video games?"))
ffmiles = float(input("frequent flier miles earned per year?"))
icecream = float(input("liters of ice cream consumed per year?"))
data, datalabels = KNN_1.filel2matrix("datingTestSet2.txt")
normMat = KNN_3.autoNorm(data)
test_dataset = array([[percenttats,ffmiles,icecream]])
a = clf.classify0(test_dataset,data,3,datalabels)
print(level[a[0]-1])
predictperson()
|
normal
|
{
"blob_id": "3086f62d4057812fc7fb4e21a18bc7d0ba786865",
"index": 2526,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml * horatio)\n errorcount = 0\n a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :], \n 3, datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount / 100\n return c\n\n\ndef predictperson():\n level = ['not at all', 'in small does', 'in large does']\n percenttats = float(input('percentage of time spent playing video games?'))\n ffmiles = float(input('frequent flier miles earned per year?'))\n icecream = float(input('liters of ice cream consumed per year?'))\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats, ffmiles, icecream]])\n a = clf.classify0(test_dataset, data, 3, datalabels)\n print(level[a[0] - 1])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml * horatio)\n errorcount = 0\n a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :], \n 3, datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount / 100\n return c\n\n\ndef predictperson():\n level = ['not at all', 'in small does', 'in large does']\n percenttats = float(input('percentage of time spent playing video games?'))\n ffmiles = float(input('frequent flier miles earned per year?'))\n icecream = float(input('liters of ice cream consumed per year?'))\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats, ffmiles, icecream]])\n a = clf.classify0(test_dataset, data, 3, datalabels)\n print(level[a[0] - 1])\n\n\npredictperson()\n",
"step-4": "from numpy import *\nimport KNN_1\nimport KNN_3\nimport KNN_suanfa as clf\n\n\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml * horatio)\n errorcount = 0\n a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :], \n 3, datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount / 100\n return c\n\n\ndef predictperson():\n level = ['not at all', 'in small does', 'in large does']\n percenttats = float(input('percentage of time spent playing video games?'))\n ffmiles = float(input('frequent flier miles earned per year?'))\n icecream = float(input('liters of ice cream consumed per year?'))\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats, ffmiles, icecream]])\n a = clf.classify0(test_dataset, data, 3, datalabels)\n print(level[a[0] - 1])\n\n\npredictperson()\n",
"step-5": "from numpy import *\nimport KNN_1\nimport KNN_3\nimport KNN_suanfa as clf\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix(\"datingTestSet2.txt\")\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml*horatio)\n errorcount = 0\n a=clf.classify0(normMat[0:numTestset,:],normMat[numTestset:ml,:],3,datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount/100\n return c\n\ndef predictperson():\n level = ['not at all','in small does','in large does']\n percenttats = float(input(\"percentage of time spent playing video games?\"))\n ffmiles = float(input(\"frequent flier miles earned per year?\"))\n icecream = float(input(\"liters of ice cream consumed per year?\"))\n data, datalabels = KNN_1.filel2matrix(\"datingTestSet2.txt\")\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats,ffmiles,icecream]])\n a = clf.classify0(test_dataset,data,3,datalabels)\n print(level[a[0]-1])\npredictperson()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/env python
import mincemeat
import sys
from mapinput import FileShardsMapInput
from mapinput import JsonFileMapInput
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
s = mincemeat.Server()
s.map_input = FileShardsMapInput("./wordcount_shard*.json", JsonFileMapInput)
s.mapfn = mapfn
s.reducefn = reducefn
s.reduce_output_format = "json"
s.reduce_shard_pattern = "wordcount_output_%s.json"
results = s.run_server(password="")
s.dump_results()
|
normal
|
{
"blob_id": "09c6dd0f32b8d71dacdd8b10d995ea1575f91f6f",
"index": 2887,
"step-1": "<mask token>\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\n<mask token>\ns.dump_results()\n",
"step-3": "<mask token>\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\ns = mincemeat.Server()\ns.map_input = FileShardsMapInput('./wordcount_shard*.json', JsonFileMapInput)\ns.mapfn = mapfn\ns.reducefn = reducefn\ns.reduce_output_format = 'json'\ns.reduce_shard_pattern = 'wordcount_output_%s.json'\nresults = s.run_server(password='')\ns.dump_results()\n",
"step-4": "import mincemeat\nimport sys\nfrom mapinput import FileShardsMapInput\nfrom mapinput import JsonFileMapInput\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\ns = mincemeat.Server()\ns.map_input = FileShardsMapInput('./wordcount_shard*.json', JsonFileMapInput)\ns.mapfn = mapfn\ns.reducefn = reducefn\ns.reduce_output_format = 'json'\ns.reduce_shard_pattern = 'wordcount_output_%s.json'\nresults = s.run_server(password='')\ns.dump_results()\n",
"step-5": "#!/usr/bin/env python\nimport mincemeat\nimport sys\n\nfrom mapinput import FileShardsMapInput\nfrom mapinput import JsonFileMapInput\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\ns = mincemeat.Server()\n\ns.map_input = FileShardsMapInput(\"./wordcount_shard*.json\", JsonFileMapInput)\ns.mapfn = mapfn\ns.reducefn = reducefn\ns.reduce_output_format = \"json\"\ns.reduce_shard_pattern = \"wordcount_output_%s.json\"\nresults = s.run_server(password=\"\")\ns.dump_results()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""Tools for working with Scores."""
from typing import List, Optional
from citrine._serialization import properties
from citrine._serialization.polymorphic_serializable import PolymorphicSerializable
from citrine._serialization.serializable import Serializable
from citrine._session import Session
from citrine.informatics.constraints import Constraint
from citrine.informatics.objectives import Objective
__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']
class Score(PolymorphicSerializable['Score']):
"""[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.
Abstract type that returns the proper type given a serialized dict.
"""
@classmethod
def get_type(cls, data):
"""Return the subtype."""
return {
'MLI': LIScore,
'MEI': EIScore,
'MEV': EVScore
}[data['type']]
class LIScore(Serializable['LIScore'], Score):
"""[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MLI')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
baselines: List[float],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
baselines: List[float],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
|
normal
|
{
"blob_id": "a0086a9d27a091776378cd8bde31c59899fc07ac",
"index": 3122,
"step-1": "<mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-2": "<mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n <mask token>\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-3": "<mask token>\n\n\nclass Score(PolymorphicSerializable['Score']):\n <mask token>\n <mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-4": "<mask token>\nfrom typing import List, Optional\nfrom citrine._serialization import properties\nfrom citrine._serialization.polymorphic_serializable import PolymorphicSerializable\nfrom citrine._serialization.serializable import Serializable\nfrom citrine._session import Session\nfrom citrine.informatics.constraints import Constraint\nfrom citrine.informatics.objectives import Objective\n__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']\n\n\nclass Score(PolymorphicSerializable['Score']):\n \"\"\"[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.\n\n Abstract type that returns the proper type given a serialized dict.\n\n\n \"\"\"\n\n @classmethod\n def get_type(cls, data):\n \"\"\"Return the subtype.\"\"\"\n return {'MLI': LIScore, 'MEI': EIScore, 'MEV': EVScore}[data['type']]\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-5": "\"\"\"Tools for working with Scores.\"\"\"\nfrom typing import List, Optional\n\nfrom citrine._serialization import properties\nfrom citrine._serialization.polymorphic_serializable import PolymorphicSerializable\nfrom citrine._serialization.serializable import Serializable\nfrom citrine._session import Session\nfrom citrine.informatics.constraints import Constraint\nfrom citrine.informatics.objectives import Objective\n\n__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']\n\n\nclass Score(PolymorphicSerializable['Score']):\n \"\"\"[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.\n\n Abstract type that returns the proper type given a serialized dict.\n\n\n \"\"\"\n\n @classmethod\n def get_type(cls, data):\n \"\"\"Return the subtype.\"\"\"\n return {\n 'MLI': LIScore,\n 'MEI': EIScore,\n 'MEV': EVScore\n }[data['type']]\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n baselines: List[float],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n baselines: List[float],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-ids": [
12,
14,
16,
20,
21
]
}
|
[
12,
14,
16,
20,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for sheet in sheets:
sh = wb[sheet]
i = 3
while True:
tmp = sh.cell(row=1, column=i).value
if tmp:
days.append(tmp)
else:
break
i += 1
print(days)
days.pop()
i = 2
while True:
tmp = sh.cell(row=i, column=2).value
if tmp:
names.append(tmp)
else:
break
i += 1
W = len(days)
H = len(names)
for y in range(2, 2 + H):
for x in range(3, 3 + W):
tmp = sh.cell(row=y, column=x).value
dict[names[y - 2]][days[x - 3]] = tmp
<|reserved_special_token_0|>
for d in days:
for t in times:
tmpl = [d, t]
for n in names:
if dict[n][d] and t in dict[n][d]:
tmpl.append(1)
else:
tmpl.append(0)
ans.append(tmpl)
for a in ans:
print(a)
<|reserved_special_token_0|>
def write_list_2d(sheet, l_2d, start_row, start_col):
for y, row in enumerate(l_2d):
for x, cell in enumerate(row):
sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[
y][x])
write_list_2d(sheet, ans, 1, 1)
wb.save(Output)
print(sheets[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filename = 'kaito7.xlsx'
Output = 'output7.xlsx'
wb = openpyxl.load_workbook(filename)
sheets = wb.sheetnames
days = []
names = []
dict = defaultdict(dict)
for sheet in sheets:
sh = wb[sheet]
i = 3
while True:
tmp = sh.cell(row=1, column=i).value
if tmp:
days.append(tmp)
else:
break
i += 1
print(days)
days.pop()
i = 2
while True:
tmp = sh.cell(row=i, column=2).value
if tmp:
names.append(tmp)
else:
break
i += 1
W = len(days)
H = len(names)
for y in range(2, 2 + H):
for x in range(3, 3 + W):
tmp = sh.cell(row=y, column=x).value
dict[names[y - 2]][days[x - 3]] = tmp
times = dict['しまむら']['7/10(水)'].split(', ')
ans = [[' ', ' '] + names]
for d in days:
for t in times:
tmpl = [d, t]
for n in names:
if dict[n][d] and t in dict[n][d]:
tmpl.append(1)
else:
tmpl.append(0)
ans.append(tmpl)
for a in ans:
print(a)
wb = openpyxl.load_workbook(Output)
sheets = wb.sheetnames
sheet = wb[sheets[0]]
def write_list_2d(sheet, l_2d, start_row, start_col):
for y, row in enumerate(l_2d):
for x, cell in enumerate(row):
sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[
y][x])
write_list_2d(sheet, ans, 1, 1)
wb.save(Output)
print(sheets[0])
<|reserved_special_token_1|>
import pandas as pd
import os
import openpyxl
from collections import defaultdict, deque
filename = 'kaito7.xlsx'
Output = 'output7.xlsx'
wb = openpyxl.load_workbook(filename)
sheets = wb.sheetnames
days = []
names = []
dict = defaultdict(dict)
for sheet in sheets:
sh = wb[sheet]
i = 3
while True:
tmp = sh.cell(row=1, column=i).value
if tmp:
days.append(tmp)
else:
break
i += 1
print(days)
days.pop()
i = 2
while True:
tmp = sh.cell(row=i, column=2).value
if tmp:
names.append(tmp)
else:
break
i += 1
W = len(days)
H = len(names)
for y in range(2, 2 + H):
for x in range(3, 3 + W):
tmp = sh.cell(row=y, column=x).value
dict[names[y - 2]][days[x - 3]] = tmp
times = dict['しまむら']['7/10(水)'].split(', ')
ans = [[' ', ' '] + names]
for d in days:
for t in times:
tmpl = [d, t]
for n in names:
if dict[n][d] and t in dict[n][d]:
tmpl.append(1)
else:
tmpl.append(0)
ans.append(tmpl)
for a in ans:
print(a)
wb = openpyxl.load_workbook(Output)
sheets = wb.sheetnames
sheet = wb[sheets[0]]
def write_list_2d(sheet, l_2d, start_row, start_col):
for y, row in enumerate(l_2d):
for x, cell in enumerate(row):
sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[
y][x])
write_list_2d(sheet, ans, 1, 1)
wb.save(Output)
print(sheets[0])
<|reserved_special_token_1|>
import pandas as pd
import os
import openpyxl
from collections import defaultdict,deque
# 調節用パラメータ
filename = 'kaito7.xlsx' # 入力ファイル名
Output = 'output7.xlsx' # 出力ディレクトリ
wb = openpyxl.load_workbook(filename)
sheets = wb.sheetnames
days = []
names = []
dict = defaultdict(dict)
for sheet in sheets:
sh = wb[sheet]
i = 3
while True:
tmp = sh.cell(row=1,column=i).value
if tmp:
days.append(tmp)
else:
break
i += 1
print(days)
days.pop()
i = 2
while True:
tmp = sh.cell(row=i,column=2).value
if tmp:
names.append(tmp)
else:
break
i += 1
W = len(days)
H = len(names)
for y in range(2,2+H):
for x in range(3,3+W):
tmp = sh.cell(row=y,column=x).value
dict[names[y-2]][days[x-3]] = tmp
times = dict['しまむら']['7/10(水)'].split(', ')
ans = [[' ', ' '] + names]
for d in days:
for t in times:
tmpl = [d,t]
for n in names:
if dict[n][d] and t in dict[n][d]:
tmpl.append(1)
else:
tmpl.append(0)
ans.append(tmpl)
for a in ans:
print(a)
wb = openpyxl.load_workbook(Output)
sheets = wb.sheetnames
sheet = wb[sheets[0]]
def write_list_2d(sheet, l_2d, start_row, start_col):
for y, row in enumerate(l_2d):
for x, cell in enumerate(row):
#print(l_2d[y][x])
sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x])
#print(sheet.cell(row=start_row + y,column=start_col + x).value)
write_list_2d(sheet,ans,1,1)
wb.save(Output)
print(sheets[0])
|
flexible
|
{
"blob_id": "37d5696c402737bfafe21b20b90a49e2753fdc4f",
"index": 7287,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\n<mask token>\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\n<mask token>\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n",
"step-3": "<mask token>\nfilename = 'kaito7.xlsx'\nOutput = 'output7.xlsx'\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\ntimes = dict['しまむら']['7/10(水)'].split(', ')\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n",
"step-4": "import pandas as pd\nimport os\nimport openpyxl\nfrom collections import defaultdict, deque\nfilename = 'kaito7.xlsx'\nOutput = 'output7.xlsx'\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\ntimes = dict['しまむら']['7/10(水)'].split(', ')\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n",
"step-5": "import pandas as pd\nimport os\nimport openpyxl\nfrom collections import defaultdict,deque\n\n# 調節用パラメータ\nfilename = 'kaito7.xlsx' # 入力ファイル名\nOutput = 'output7.xlsx' # 出力ディレクトリ\n\n\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\n\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1,column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n\n i = 2\n while True:\n tmp = sh.cell(row=i,column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n\n W = len(days)\n H = len(names)\n for y in range(2,2+H):\n for x in range(3,3+W):\n tmp = sh.cell(row=y,column=x).value\n dict[names[y-2]][days[x-3]] = tmp\n\ntimes = dict['しまむら']['7/10(水)'].split(', ')\n\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d,t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\n\nfor a in ans:\n print(a)\n\n\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n #print(l_2d[y][x])\n sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x])\n #print(sheet.cell(row=start_row + y,column=start_col + x).value)\n\nwrite_list_2d(sheet,ans,1,1)\n\nwb.save(Output)\n\nprint(sheets[0])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from django.contrib import admin
from .models import Contactus,ContactusAdmin,Company,CompanyAdmin,Products,ProductsAdmin,Brands,BrandsAdmin
# Register your models here.
admin.site.register(Contactus,ContactusAdmin),
admin.site.register(Company,CompanyAdmin),
admin.site.register(Products,ProductsAdmin),
admin.site.register(Brands,BrandsAdmin),
|
normal
|
{
"blob_id": "9586dc118be4388491770d823a38e8068e3b91cb",
"index": 5960,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Contactus, ContactusAdmin),\nadmin.site.register(Company, CompanyAdmin),\nadmin.site.register(Products, ProductsAdmin),\nadmin.site.register(Brands, BrandsAdmin),\n",
"step-3": "from django.contrib import admin\nfrom .models import Contactus, ContactusAdmin, Company, CompanyAdmin, Products, ProductsAdmin, Brands, BrandsAdmin\nadmin.site.register(Contactus, ContactusAdmin),\nadmin.site.register(Company, CompanyAdmin),\nadmin.site.register(Products, ProductsAdmin),\nadmin.site.register(Brands, BrandsAdmin),\n",
"step-4": "from django.contrib import admin\nfrom .models import Contactus,ContactusAdmin,Company,CompanyAdmin,Products,ProductsAdmin,Brands,BrandsAdmin\n# Register your models here.\n\nadmin.site.register(Contactus,ContactusAdmin),\nadmin.site.register(Company,CompanyAdmin),\nadmin.site.register(Products,ProductsAdmin),\nadmin.site.register(Brands,BrandsAdmin),",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
s = input().strip()
s = s.replace('BC', 'X')
ans = 0
for ax in re.split('[BC]+', s):
inds = []
for i in range(len(ax)):
if ax[i] == 'A':
inds.append(i)
ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(
inds)))
print(ans)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
s = input().strip()
s = s.replace('BC', 'X')
ans = 0
for ax in re.split('[BC]+', s):
inds = []
for i in range(len(ax)):
if ax[i] == 'A':
inds.append(i)
ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(
inds)))
print(ans)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import re
def main():
s = input().strip()
s = s.replace('BC', 'X')
ans = 0
for ax in re.split('[BC]+', s):
inds = []
for i in range(len(ax)):
if ax[i] == 'A':
inds.append(i)
ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(
inds)))
print(ans)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import re
def main():
s = input().strip()
s = s.replace('BC', 'X')
ans = 0
for ax in re.split(r'[BC]+', s):
inds = []
for i in range(len(ax)):
if ax[i] == 'A':
inds.append(i)
ans += sum([len(ax) - 1 - ind for ind in inds]) - sum(range(len(inds)))
print(ans)
if __name__=='__main__':
main()
|
flexible
|
{
"blob_id": "4100415b0df52e8e14b00dd66c7c53cd46c0ea6e",
"index": 2378,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split('[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(\n inds)))\n print(ans)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split('[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(\n inds)))\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import re\n\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split('[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(\n inds)))\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport re\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split(r'[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([len(ax) - 1 - ind for ind in inds]) - sum(range(len(inds)))\n print(ans)\n\nif __name__=='__main__':\n main()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
from captcha.fields import CaptchaField
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from news.models import News, Comment, Profile
class UserRegisterForm(UserCreationForm):
"""Форма регистрации"""
username = forms.CharField(label='Имя пользоватьеля', help_text='Максимум 150 символов',
widget=forms.TextInput(attrs={"class": "form-control"}))
password1 = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={"class": "form-control"}))
password2 = forms.CharField(label='Подтверждение пароля',
widget=forms.PasswordInput(attrs={"class": "form-control"}))
email = forms.EmailField(label='Адрес электронной почты', widget=forms.EmailInput(attrs={"class": "form-control"}))
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
# widgets = {
# 'username': forms.TextInput(attrs={"class": "form-control"}),
# 'email': forms.EmailInput(attrs={"class": "form-control"}),
# 'password1': forms.PasswordInput(attrs={"class": "form-control"}),
# 'password2': forms.PasswordInput(attrs={"class": "form-control"}),
# }
class UserLoginForm(AuthenticationForm):
"""Форма входа в систему"""
username = forms.CharField(label='Имя пользоватьеля',
widget=forms.TextInput(attrs={"class": "form-control"}))
password = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={"class": "form-control"}))
class NewsForm(forms.ModelForm):
"""Форма создания новости"""
class Meta:
model = News
fields = ['title', 'slug', 'content', 'photo', 'category']
widgets = {
'title': forms.TextInput(attrs={"class": "form-control"}),
'content': forms.Textarea(attrs={"class": "form-control", "rows": 5}),
'category': forms.Select(attrs={"class": "form-control"}),
}
"""Напишем собственный валидатор для title"""
def clean_title(self):
"""Получим очищеный title"""
title = self.cleaned_data['title']
if re.match(r'\d', title):
raise ValidationError('Название не должно начинаться с цифры')
return title
class ContactForm(forms.Form):
"""Форма обратной связи"""
subject = forms.CharField(label='Тема',
widget=forms.TextInput(attrs={"class": "form-control"}))
content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={"class": "form-control",
'rows': 5}))
captcha = CaptchaField()
class CommentForm(forms.ModelForm):
"""Форма комментариев"""
class Meta:
model = Comment
fields = ['text', ]
widgets = {
'text': forms.Textarea(attrs={"class": "form-control", "rows": 5}),
}
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['location', 'birth_date', ]
|
normal
|
{
"blob_id": "1b4a012f5b491c39c0abd139dd54f2095ea9d221",
"index": 3016,
"step-1": "<mask token>\n\n\nclass ContactForm(forms.Form):\n \"\"\"Форма обратной связи\"\"\"\n subject = forms.CharField(label='Тема', widget=forms.TextInput(attrs={\n 'class': 'form-control'}))\n content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={\n 'class': 'form-control', 'rows': 5}))\n captcha = CaptchaField()\n\n\nclass CommentForm(forms.ModelForm):\n \"\"\"Форма комментариев\"\"\"\n\n\n class Meta:\n model = Comment\n fields = ['text']\n widgets = {'text': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5})}\n\n\nclass UserForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n fields = ['location', 'birth_date']\n",
"step-2": "<mask token>\n\n\nclass UserLoginForm(AuthenticationForm):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass NewsForm(forms.ModelForm):\n \"\"\"Форма создания новости\"\"\"\n\n\n class Meta:\n model = News\n fields = ['title', 'slug', 'content', 'photo', 'category']\n widgets = {'title': forms.TextInput(attrs={'class': 'form-control'}\n ), 'content': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5}), 'category': forms.Select(attrs={'class':\n 'form-control'})}\n \"\"\"Напишем собственный валидатор для title\"\"\"\n\n def clean_title(self):\n \"\"\"Получим очищеный title\"\"\"\n title = self.cleaned_data['title']\n if re.match('\\\\d', title):\n raise ValidationError('Название не должно начинаться с цифры')\n return title\n\n\nclass ContactForm(forms.Form):\n \"\"\"Форма обратной связи\"\"\"\n subject = forms.CharField(label='Тема', widget=forms.TextInput(attrs={\n 'class': 'form-control'}))\n content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={\n 'class': 'form-control', 'rows': 5}))\n captcha = CaptchaField()\n\n\nclass CommentForm(forms.ModelForm):\n \"\"\"Форма комментариев\"\"\"\n\n\n class Meta:\n model = Comment\n fields = ['text']\n widgets = {'text': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5})}\n\n\nclass UserForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n fields = ['location', 'birth_date']\n",
"step-3": "<mask token>\n\n\nclass UserRegisterForm(UserCreationForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass UserLoginForm(AuthenticationForm):\n \"\"\"Форма входа в систему\"\"\"\n username = forms.CharField(label='Имя пользоватьеля', widget=forms.\n TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(label='Пароль', widget=forms.PasswordInput(\n attrs={'class': 'form-control'}))\n\n\nclass NewsForm(forms.ModelForm):\n \"\"\"Форма создания новости\"\"\"\n\n\n class Meta:\n model = News\n fields = ['title', 'slug', 'content', 'photo', 'category']\n widgets = {'title': forms.TextInput(attrs={'class': 'form-control'}\n ), 'content': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5}), 'category': forms.Select(attrs={'class':\n 'form-control'})}\n \"\"\"Напишем собственный валидатор для title\"\"\"\n\n def clean_title(self):\n \"\"\"Получим очищеный title\"\"\"\n title = self.cleaned_data['title']\n if re.match('\\\\d', title):\n raise ValidationError('Название не должно начинаться с цифры')\n return title\n\n\nclass ContactForm(forms.Form):\n \"\"\"Форма обратной связи\"\"\"\n subject = forms.CharField(label='Тема', widget=forms.TextInput(attrs={\n 'class': 'form-control'}))\n content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={\n 'class': 'form-control', 'rows': 5}))\n captcha = CaptchaField()\n\n\nclass CommentForm(forms.ModelForm):\n \"\"\"Форма комментариев\"\"\"\n\n\n class Meta:\n model = Comment\n fields = ['text']\n widgets = {'text': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5})}\n\n\nclass UserForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n fields = ['location', 'birth_date']\n",
"step-4": "<mask token>\n\n\nclass UserRegisterForm(UserCreationForm):\n \"\"\"Форма регистрации\"\"\"\n username = forms.CharField(label='Имя пользоватьеля', help_text=\n 'Максимум 150 символов', widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password1 = forms.CharField(label='Пароль', widget=forms.PasswordInput(\n attrs={'class': 'form-control'}))\n password2 = forms.CharField(label='Подтверждение пароля', widget=forms.\n PasswordInput(attrs={'class': 'form-control'}))\n email = forms.EmailField(label='Адрес электронной почты', widget=forms.\n EmailInput(attrs={'class': 'form-control'}))\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass UserLoginForm(AuthenticationForm):\n \"\"\"Форма входа в систему\"\"\"\n username = forms.CharField(label='Имя пользоватьеля', widget=forms.\n TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(label='Пароль', widget=forms.PasswordInput(\n attrs={'class': 'form-control'}))\n\n\nclass NewsForm(forms.ModelForm):\n \"\"\"Форма создания новости\"\"\"\n\n\n class Meta:\n model = News\n fields = ['title', 'slug', 'content', 'photo', 'category']\n widgets = {'title': forms.TextInput(attrs={'class': 'form-control'}\n ), 'content': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5}), 'category': forms.Select(attrs={'class':\n 'form-control'})}\n \"\"\"Напишем собственный валидатор для title\"\"\"\n\n def clean_title(self):\n \"\"\"Получим очищеный title\"\"\"\n title = self.cleaned_data['title']\n if re.match('\\\\d', title):\n raise ValidationError('Название не должно начинаться с цифры')\n return title\n\n\nclass ContactForm(forms.Form):\n \"\"\"Форма обратной связи\"\"\"\n subject = forms.CharField(label='Тема', widget=forms.TextInput(attrs={\n 'class': 'form-control'}))\n content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={\n 'class': 'form-control', 'rows': 5}))\n captcha = CaptchaField()\n\n\nclass CommentForm(forms.ModelForm):\n \"\"\"Форма комментариев\"\"\"\n\n\n class Meta:\n model = Comment\n fields = ['text']\n widgets = {'text': forms.Textarea(attrs={'class': 'form-control',\n 'rows': 5})}\n\n\nclass UserForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n fields = ['location', 'birth_date']\n",
"step-5": "import re\nfrom captcha.fields import CaptchaField\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom news.models import News, Comment, Profile\n\n\nclass UserRegisterForm(UserCreationForm):\n \"\"\"Форма регистрации\"\"\"\n username = forms.CharField(label='Имя пользоватьеля', help_text='Максимум 150 символов',\n widget=forms.TextInput(attrs={\"class\": \"form-control\"}))\n password1 = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={\"class\": \"form-control\"}))\n password2 = forms.CharField(label='Подтверждение пароля',\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"}))\n email = forms.EmailField(label='Адрес электронной почты', widget=forms.EmailInput(attrs={\"class\": \"form-control\"}))\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password1', 'password2')\n # widgets = {\n # 'username': forms.TextInput(attrs={\"class\": \"form-control\"}),\n # 'email': forms.EmailInput(attrs={\"class\": \"form-control\"}),\n # 'password1': forms.PasswordInput(attrs={\"class\": \"form-control\"}),\n # 'password2': forms.PasswordInput(attrs={\"class\": \"form-control\"}),\n # }\n\n\nclass UserLoginForm(AuthenticationForm):\n \"\"\"Форма входа в систему\"\"\"\n username = forms.CharField(label='Имя пользоватьеля',\n widget=forms.TextInput(attrs={\"class\": \"form-control\"}))\n password = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={\"class\": \"form-control\"}))\n\n\nclass NewsForm(forms.ModelForm):\n \"\"\"Форма создания новости\"\"\"\n\n class Meta:\n model = News\n fields = ['title', 'slug', 'content', 'photo', 'category']\n widgets = {\n 'title': forms.TextInput(attrs={\"class\": \"form-control\"}),\n 'content': forms.Textarea(attrs={\"class\": \"form-control\", \"rows\": 5}),\n 'category': forms.Select(attrs={\"class\": \"form-control\"}),\n }\n\n \"\"\"Напишем собственный валидатор для title\"\"\"\n\n def clean_title(self):\n \"\"\"Получим очищеный title\"\"\"\n title = self.cleaned_data['title']\n if re.match(r'\\d', title):\n raise ValidationError('Название не должно начинаться с цифры')\n return title\n\n\nclass ContactForm(forms.Form):\n \"\"\"Форма обратной связи\"\"\"\n subject = forms.CharField(label='Тема',\n widget=forms.TextInput(attrs={\"class\": \"form-control\"}))\n content = forms.CharField(label='Текст', widget=forms.Textarea(attrs={\"class\": \"form-control\",\n 'rows': 5}))\n captcha = CaptchaField()\n\n\nclass CommentForm(forms.ModelForm):\n \"\"\"Форма комментариев\"\"\"\n\n class Meta:\n model = Comment\n fields = ['text', ]\n widgets = {\n 'text': forms.Textarea(attrs={\"class\": \"form-control\", \"rows\": 5}),\n }\n\n\nclass UserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email')\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['location', 'birth_date', ]\n\n\n",
"step-ids": [
7,
11,
14,
16,
18
]
}
|
[
7,
11,
14,
16,
18
] |
<|reserved_special_token_0|>
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state('csv_files', default=[], dist_reduce_fx='cat')
<|reserved_special_token_0|>
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f'{csv}_gt').item()
tp += getattr(self, f'{csv}_tp').item()
fp += getattr(self, f'{csv}_fp').item()
pos += getattr(self, f'{csv}_pos').item()
neg += getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f'{csv}_gt').item()
tp = getattr(self, f'{csv}_tp').item()
fp = getattr(self, f'{csv}_fp').item()
pos = getattr(self, f'{csv}_pos').item()
neg = getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
fppi = fp / (pos + neg + 1e-05)
lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':
pre, 'rec': rec, 'f1': f1, 'fppi': fppi}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state('csv_files', default=[], dist_reduce_fx='cat')
def update(self, preds: torch.Tensor, target: torch.Tensor):
logit_seg, _ = preds
_, mask, mask_cls, _, img_path, _ = target
assert logit_seg.shape == mask.shape
pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()
gt_seg = mask.detach().cpu().numpy()
gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()
pred_seg = pred_seg.astype('float32')
for idx, file_path in enumerate(img_path):
pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))
pred = np.expand_dims(pred, 0)
gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),
interpolation=cv2.INTER_NEAREST)
gt = np.expand_dims(gt, 0)
gt_c = gt_cls[idx]
is_p = int(gt_c == 1.0)
is_n = 1 - is_p
gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,
iou_th=self.iou_thr, prob_ths=[self.prob_thr])
csv = file_path.split('png_1024/')[1].split('/')[0]
if not hasattr(self, f'{csv}_gt'):
self.csv_files += [csv]
self.add_state(f'{csv}_gt', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_pred', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_tp', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_fp', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_pos', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_neg', default=torch.tensor(0),
dist_reduce_fx='sum')
setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]
)
setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +
pred_nums_[0, 0])
setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +
tp_nums_[0, 0])
setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +
fp_nums_[0, 0])
setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)
setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f'{csv}_gt').item()
tp += getattr(self, f'{csv}_tp').item()
fp += getattr(self, f'{csv}_fp').item()
pos += getattr(self, f'{csv}_pos').item()
neg += getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f'{csv}_gt').item()
tp = getattr(self, f'{csv}_tp').item()
fp = getattr(self, f'{csv}_fp').item()
pos = getattr(self, f'{csv}_pos').item()
neg = getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
fppi = fp / (pos + neg + 1e-05)
lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':
pre, 'rec': rec, 'f1': f1, 'fppi': fppi}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state('csv_files', default=[], dist_reduce_fx='cat')
def update(self, preds: torch.Tensor, target: torch.Tensor):
logit_seg, _ = preds
_, mask, mask_cls, _, img_path, _ = target
assert logit_seg.shape == mask.shape
pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()
gt_seg = mask.detach().cpu().numpy()
gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()
pred_seg = pred_seg.astype('float32')
for idx, file_path in enumerate(img_path):
pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))
pred = np.expand_dims(pred, 0)
gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),
interpolation=cv2.INTER_NEAREST)
gt = np.expand_dims(gt, 0)
gt_c = gt_cls[idx]
is_p = int(gt_c == 1.0)
is_n = 1 - is_p
gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,
iou_th=self.iou_thr, prob_ths=[self.prob_thr])
csv = file_path.split('png_1024/')[1].split('/')[0]
if not hasattr(self, f'{csv}_gt'):
self.csv_files += [csv]
self.add_state(f'{csv}_gt', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_pred', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_tp', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_fp', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_pos', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_neg', default=torch.tensor(0),
dist_reduce_fx='sum')
setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]
)
setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +
pred_nums_[0, 0])
setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +
tp_nums_[0, 0])
setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +
fp_nums_[0, 0])
setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)
setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f'{csv}_gt').item()
tp += getattr(self, f'{csv}_tp').item()
fp += getattr(self, f'{csv}_fp').item()
pos += getattr(self, f'{csv}_pos').item()
neg += getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f'{csv}_gt').item()
tp = getattr(self, f'{csv}_tp').item()
fp = getattr(self, f'{csv}_fp').item()
pos = getattr(self, f'{csv}_pos').item()
neg = getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
fppi = fp / (pos + neg + 1e-05)
lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':
pre, 'rec': rec, 'f1': f1, 'fppi': fppi}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
def calc_iou(bbox_a, bbox_b):
"""
:param a: bbox list [min_y, min_x, max_y, max_x]
:param b: bbox list [min_y, min_x, max_y, max_x]
:return:
"""
size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])
size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])
min_ab_y = max(bbox_a[0], bbox_b[0])
min_ab_x = max(bbox_a[1], bbox_b[1])
max_ab_y = min(bbox_a[2], bbox_b[2])
max_ab_x = min(bbox_a[3], bbox_b[3])
inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)
return inter_ab / (size_a + size_b - inter_ab)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state('csv_files', default=[], dist_reduce_fx='cat')
def update(self, preds: torch.Tensor, target: torch.Tensor):
logit_seg, _ = preds
_, mask, mask_cls, _, img_path, _ = target
assert logit_seg.shape == mask.shape
pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()
gt_seg = mask.detach().cpu().numpy()
gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()
pred_seg = pred_seg.astype('float32')
for idx, file_path in enumerate(img_path):
pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))
pred = np.expand_dims(pred, 0)
gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),
interpolation=cv2.INTER_NEAREST)
gt = np.expand_dims(gt, 0)
gt_c = gt_cls[idx]
is_p = int(gt_c == 1.0)
is_n = 1 - is_p
gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,
iou_th=self.iou_thr, prob_ths=[self.prob_thr])
csv = file_path.split('png_1024/')[1].split('/')[0]
if not hasattr(self, f'{csv}_gt'):
self.csv_files += [csv]
self.add_state(f'{csv}_gt', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_pred', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_tp', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_fp', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_pos', default=Tensor(0),
dist_reduce_fx='sum')
self.add_state(f'{csv}_neg', default=torch.tensor(0),
dist_reduce_fx='sum')
setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]
)
setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +
pred_nums_[0, 0])
setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +
tp_nums_[0, 0])
setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +
fp_nums_[0, 0])
setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)
setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f'{csv}_gt').item()
tp += getattr(self, f'{csv}_tp').item()
fp += getattr(self, f'{csv}_fp').item()
pos += getattr(self, f'{csv}_pos').item()
neg += getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f'{csv}_gt').item()
tp = getattr(self, f'{csv}_tp').item()
fp = getattr(self, f'{csv}_fp').item()
pos = getattr(self, f'{csv}_pos').item()
neg = getattr(self, f'{csv}_neg').item()
pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)
rec = tp / (gt + 1e-05)
f1 = 2 * (pre * rec) / (pre + rec + 1e-05)
fppi = fp / (pos + neg + 1e-05)
lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':
pre, 'rec': rec, 'f1': f1, 'fppi': fppi}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
def calc_iou(bbox_a, bbox_b):
"""
:param a: bbox list [min_y, min_x, max_y, max_x]
:param b: bbox list [min_y, min_x, max_y, max_x]
:return:
"""
size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])
size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])
min_ab_y = max(bbox_a[0], bbox_b[0])
min_ab_x = max(bbox_a[1], bbox_b[1])
max_ab_y = min(bbox_a[2], bbox_b[2])
max_ab_x = min(bbox_a[3], bbox_b[3])
inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)
return inter_ab / (size_a + size_b - inter_ab)
def evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):
"""
:param pred: Prediction Seg Map, shape = (1, num_classes, height, width)
:param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)
:param iou_th: Threshold for prediction and gt matching
:return:
gt_nums: Ground-truth region numbers
pred_nums: Prediction region numbers
tp_nums: True Positive region numbers
fp_nums: False Positive region numbers
# 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)
# Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)
# i) Region bbox 크기 < 400 pixels
# ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)
# issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.
# # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.
"""
if len(pred.shape) > 3:
pred = pred[0]
gt = gt[0]
num_classes = pred.shape[0]
image_size = gt.shape[2]
gt_regions = [skimage.measure.regionprops(skimage.measure.label(gt[c, :,
:])) for c in range(num_classes)]
for c in range(num_classes):
gt_regions[c] = [r for r in gt_regions[c] if r.area > (20 * (
image_size / 2048)) ** 2]
pred_regions = [[skimage.measure.regionprops(skimage.measure.label(pred
[c, :, :] > th)) for c in range(num_classes)] for th in prob_ths]
gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])
pred_nums = np.array([[len(pred_regions[thi][c]) for c in range(
num_classes)] for thi in range(len(prob_ths))])
tp_nums = np.zeros((len(prob_ths), num_classes))
fp_nums = pred_nums.copy()
for c in range(num_classes):
for thi in range(len(prob_ths)):
if gt_nums[c] == 0 or pred_nums[thi][c] == 0:
continue
iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))
for gi, gr in enumerate(gt_regions[c]):
for pi, pr in enumerate(pred_regions[thi][c]):
iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)
tp_nums[thi][c] = np.sum(np.any(iou_matrix >= iou_th, axis=1))
fp_nums[thi][c] -= np.sum(np.any(iou_matrix > iou_th, axis=0))
return gt_nums, pred_nums, tp_nums, fp_nums
<|reserved_special_token_1|>
# For better usage on ddp
import torch
from pytorch_lightning.metrics import Metric
import cv2
import numpy as np
import skimage
import torch.tensor as Tensor
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state("csv_files", default=[], dist_reduce_fx="cat")
def update(self, preds: torch.Tensor, target: torch.Tensor):
logit_seg, _ = preds
_, mask, mask_cls, _, img_path, _ = target
assert logit_seg.shape == mask.shape
pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()
gt_seg = mask.detach().cpu().numpy()
gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()
pred_seg = pred_seg.astype("float32")
for idx, file_path in enumerate(img_path):
pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))
pred = np.expand_dims(pred, 0)
gt = cv2.resize(
gt_seg[idx][0],
(self.img_size, self.img_size),
interpolation=cv2.INTER_NEAREST,
)
gt = np.expand_dims(gt, 0)
gt_c = gt_cls[idx]
is_p = int(gt_c == 1.0)
is_n = 1 - is_p
gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(
pred, gt, iou_th=self.iou_thr, prob_ths=[self.prob_thr]
)
# csv = file_path.split("/")[5]
csv = file_path.split("png_1024/")[1].split("/")[0]
if not hasattr(self, f"{csv}_gt"):
self.csv_files += [csv]
self.add_state(f"{csv}_gt", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_pred", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_tp", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_fp", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_pos", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(
f"{csv}_neg", default=torch.tensor(0), dist_reduce_fx="sum"
)
# TODO: Need to be change if num_class > 1
# FIXME: 몬 생긴 포맷..
setattr(self, f"{csv}_gt", getattr(self, f"{csv}_gt") + gt_nums_[0])
setattr(
self, f"{csv}_pred", getattr(self, f"{csv}_pred") + pred_nums_[0, 0]
)
setattr(self, f"{csv}_tp", getattr(self, f"{csv}_tp") + tp_nums_[0, 0])
setattr(self, f"{csv}_fp", getattr(self, f"{csv}_fp") + fp_nums_[0, 0])
setattr(self, f"{csv}_pos", getattr(self, f"{csv}_pos") + is_p)
setattr(self, f"{csv}_neg", getattr(self, f"{csv}_neg") + is_n)
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f"{csv}_gt").item()
tp += getattr(self, f"{csv}_tp").item()
fp += getattr(self, f"{csv}_fp").item()
pos += getattr(self, f"{csv}_pos").item()
neg += getattr(self, f"{csv}_neg").item()
pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)
rec = tp / (gt + 1e-5)
f1 = 2 * (pre * rec) / (pre + rec + 1e-5)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {
"pre": pre,
"rec": rec,
"f1": f1,
"myf1": myf1,
}
# FIXME: DDP Error: https://github.com/PyTorchLightning/pytorch-lightning/discussions/2529
# Tensors must be CUDA and dense
# if self.use_ddp:
# lesion_metric_dict = torch.FloatTensor([myf1], device=self.device)
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f"{csv}_gt").item()
tp = getattr(self, f"{csv}_tp").item()
fp = getattr(self, f"{csv}_fp").item()
pos = getattr(self, f"{csv}_pos").item()
neg = getattr(self, f"{csv}_neg").item()
pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)
rec = tp / (gt + 1e-5)
f1 = 2 * (pre * rec) / (pre + rec + 1e-5)
fppi = fp / (pos + neg + 1e-5)
# myf1 = (pre + rec) / 2.0
lesion_metric_dict = {
"gt": gt,
"pos": pos,
"neg": neg,
"pre": pre,
"rec": rec,
"f1": f1,
"fppi": fppi
# "myf1": myf1,
}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
# Helper functions
def calc_iou(bbox_a, bbox_b):
"""
:param a: bbox list [min_y, min_x, max_y, max_x]
:param b: bbox list [min_y, min_x, max_y, max_x]
:return:
"""
size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])
size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])
min_ab_y = max(bbox_a[0], bbox_b[0])
min_ab_x = max(bbox_a[1], bbox_b[1])
max_ab_y = min(bbox_a[2], bbox_b[2])
max_ab_x = min(bbox_a[3], bbox_b[3])
inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)
return inter_ab / (size_a + size_b - inter_ab)
def evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):
"""
:param pred: Prediction Seg Map, shape = (1, num_classes, height, width)
:param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)
:param iou_th: Threshold for prediction and gt matching
:return:
gt_nums: Ground-truth region numbers
pred_nums: Prediction region numbers
tp_nums: True Positive region numbers
fp_nums: False Positive region numbers
# 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)
# Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)
# i) Region bbox 크기 < 400 pixels
# ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)
# issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.
# # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.
"""
if len(pred.shape) > 3:
pred = pred[0]
gt = gt[0]
num_classes = pred.shape[0]
image_size = gt.shape[2]
gt_regions = [
skimage.measure.regionprops(skimage.measure.label(gt[c, :, :]))
for c in range(num_classes)
]
for c in range(num_classes):
gt_regions[c] = [
r for r in gt_regions[c] if r.area > (20 * (image_size / 2048)) ** 2
]
pred_regions = [
[
skimage.measure.regionprops(skimage.measure.label(pred[c, :, :] > th))
for c in range(num_classes)
]
for th in prob_ths
] # shape - len(prob_th), num_classes
# 초기화
gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])
pred_nums = np.array(
[
[len(pred_regions[thi][c]) for c in range(num_classes)]
for thi in range(len(prob_ths))
]
)
tp_nums = np.zeros((len(prob_ths), num_classes))
fp_nums = pred_nums.copy() # .copy() 없으면 포인터가 같아짐
# Gt-Pred Bbox Iou Matrix
for c in range(num_classes):
for thi in range(len(prob_ths)):
if (gt_nums[c] == 0) or (pred_nums[thi][c] == 0): # np array 이상함;
continue
iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))
for gi, gr in enumerate(gt_regions[c]):
for pi, pr in enumerate(pred_regions[thi][c]):
iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)
tp_nums[thi][c] = np.sum(np.any((iou_matrix >= iou_th), axis=1))
fp_nums[thi][c] -= np.sum(np.any((iou_matrix > iou_th), axis=0))
return gt_nums, pred_nums, tp_nums, fp_nums
|
flexible
|
{
"blob_id": "8d3f8872a3d5c4351551dc2d46839763d28ebd70",
"index": 3586,
"step-1": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n <mask token>\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n assert logit_seg.shape == mask.shape\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n pred_seg = pred_seg.astype('float32')\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST)\n gt = np.expand_dims(gt, 0)\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,\n iou_th=self.iou_thr, prob_ths=[self.prob_thr])\n csv = file_path.split('png_1024/')[1].split('/')[0]\n if not hasattr(self, f'{csv}_gt'):\n self.csv_files += [csv]\n self.add_state(f'{csv}_gt', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pred', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_tp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_fp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pos', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_neg', default=torch.tensor(0),\n dist_reduce_fx='sum')\n setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]\n )\n setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +\n pred_nums_[0, 0])\n setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +\n tp_nums_[0, 0])\n setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +\n fp_nums_[0, 0])\n setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)\n setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n assert logit_seg.shape == mask.shape\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n pred_seg = pred_seg.astype('float32')\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST)\n gt = np.expand_dims(gt, 0)\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,\n iou_th=self.iou_thr, prob_ths=[self.prob_thr])\n csv = file_path.split('png_1024/')[1].split('/')[0]\n if not hasattr(self, f'{csv}_gt'):\n self.csv_files += [csv]\n self.add_state(f'{csv}_gt', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pred', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_tp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_fp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pos', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_neg', default=torch.tensor(0),\n dist_reduce_fx='sum')\n setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]\n )\n setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +\n pred_nums_[0, 0])\n setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +\n tp_nums_[0, 0])\n setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +\n fp_nums_[0, 0])\n setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)\n setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\ndef calc_iou(bbox_a, bbox_b):\n \"\"\"\n :param a: bbox list [min_y, min_x, max_y, max_x]\n :param b: bbox list [min_y, min_x, max_y, max_x]\n :return:\n \"\"\"\n size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])\n size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])\n min_ab_y = max(bbox_a[0], bbox_b[0])\n min_ab_x = max(bbox_a[1], bbox_b[1])\n max_ab_y = min(bbox_a[2], bbox_b[2])\n max_ab_x = min(bbox_a[3], bbox_b[3])\n inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)\n return inter_ab / (size_a + size_b - inter_ab)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n assert logit_seg.shape == mask.shape\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n pred_seg = pred_seg.astype('float32')\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST)\n gt = np.expand_dims(gt, 0)\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,\n iou_th=self.iou_thr, prob_ths=[self.prob_thr])\n csv = file_path.split('png_1024/')[1].split('/')[0]\n if not hasattr(self, f'{csv}_gt'):\n self.csv_files += [csv]\n self.add_state(f'{csv}_gt', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pred', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_tp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_fp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pos', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_neg', default=torch.tensor(0),\n dist_reduce_fx='sum')\n setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]\n )\n setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +\n pred_nums_[0, 0])\n setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +\n tp_nums_[0, 0])\n setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +\n fp_nums_[0, 0])\n setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)\n setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\ndef calc_iou(bbox_a, bbox_b):\n \"\"\"\n :param a: bbox list [min_y, min_x, max_y, max_x]\n :param b: bbox list [min_y, min_x, max_y, max_x]\n :return:\n \"\"\"\n size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])\n size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])\n min_ab_y = max(bbox_a[0], bbox_b[0])\n min_ab_x = max(bbox_a[1], bbox_b[1])\n max_ab_y = min(bbox_a[2], bbox_b[2])\n max_ab_x = min(bbox_a[3], bbox_b[3])\n inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)\n return inter_ab / (size_a + size_b - inter_ab)\n\n\ndef evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):\n \"\"\"\n :param pred: Prediction Seg Map, shape = (1, num_classes, height, width)\n :param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)\n :param iou_th: Threshold for prediction and gt matching\n :return:\n gt_nums: Ground-truth region numbers\n pred_nums: Prediction region numbers\n tp_nums: True Positive region numbers\n fp_nums: False Positive region numbers\n # 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)\n # Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)\n # i) Region bbox 크기 < 400 pixels\n # ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)\n # issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.\n # # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.\n \"\"\"\n if len(pred.shape) > 3:\n pred = pred[0]\n gt = gt[0]\n num_classes = pred.shape[0]\n image_size = gt.shape[2]\n gt_regions = [skimage.measure.regionprops(skimage.measure.label(gt[c, :,\n :])) for c in range(num_classes)]\n for c in range(num_classes):\n gt_regions[c] = [r for r in gt_regions[c] if r.area > (20 * (\n image_size / 2048)) ** 2]\n pred_regions = [[skimage.measure.regionprops(skimage.measure.label(pred\n [c, :, :] > th)) for c in range(num_classes)] for th in prob_ths]\n gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])\n pred_nums = np.array([[len(pred_regions[thi][c]) for c in range(\n num_classes)] for thi in range(len(prob_ths))])\n tp_nums = np.zeros((len(prob_ths), num_classes))\n fp_nums = pred_nums.copy()\n for c in range(num_classes):\n for thi in range(len(prob_ths)):\n if gt_nums[c] == 0 or pred_nums[thi][c] == 0:\n continue\n iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))\n for gi, gr in enumerate(gt_regions[c]):\n for pi, pr in enumerate(pred_regions[thi][c]):\n iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)\n tp_nums[thi][c] = np.sum(np.any(iou_matrix >= iou_th, axis=1))\n fp_nums[thi][c] -= np.sum(np.any(iou_matrix > iou_th, axis=0))\n return gt_nums, pred_nums, tp_nums, fp_nums\n",
"step-5": "# For better usage on ddp\n\nimport torch\nfrom pytorch_lightning.metrics import Metric\nimport cv2\nimport numpy as np\nimport skimage\nimport torch.tensor as Tensor\n\n\nclass SegMetric(Metric):\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n # call `self.add_state`for every internal state that is needed for the metrics computations\n # dist_reduce_fx indicates the function that should be used to reduce\n # state from multiple processes\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state(\"csv_files\", default=[], dist_reduce_fx=\"cat\")\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n\n assert logit_seg.shape == mask.shape\n\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n\n pred_seg = pred_seg.astype(\"float32\")\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(\n gt_seg[idx][0],\n (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST,\n )\n gt = np.expand_dims(gt, 0)\n\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(\n pred, gt, iou_th=self.iou_thr, prob_ths=[self.prob_thr]\n )\n\n # csv = file_path.split(\"/\")[5]\n csv = file_path.split(\"png_1024/\")[1].split(\"/\")[0]\n if not hasattr(self, f\"{csv}_gt\"):\n self.csv_files += [csv]\n self.add_state(f\"{csv}_gt\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_pred\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_tp\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_fp\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_pos\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\n f\"{csv}_neg\", default=torch.tensor(0), dist_reduce_fx=\"sum\"\n )\n\n # TODO: Need to be change if num_class > 1\n # FIXME: 몬 생긴 포맷..\n setattr(self, f\"{csv}_gt\", getattr(self, f\"{csv}_gt\") + gt_nums_[0])\n setattr(\n self, f\"{csv}_pred\", getattr(self, f\"{csv}_pred\") + pred_nums_[0, 0]\n )\n setattr(self, f\"{csv}_tp\", getattr(self, f\"{csv}_tp\") + tp_nums_[0, 0])\n setattr(self, f\"{csv}_fp\", getattr(self, f\"{csv}_fp\") + fp_nums_[0, 0])\n setattr(self, f\"{csv}_pos\", getattr(self, f\"{csv}_pos\") + is_p)\n setattr(self, f\"{csv}_neg\", getattr(self, f\"{csv}_neg\") + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f\"{csv}_gt\").item()\n tp += getattr(self, f\"{csv}_tp\").item()\n fp += getattr(self, f\"{csv}_fp\").item()\n pos += getattr(self, f\"{csv}_pos\").item()\n neg += getattr(self, f\"{csv}_neg\").item()\n\n pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)\n rec = tp / (gt + 1e-5)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-5)\n myf1 = (pre + rec) / 2.0\n\n lesion_metric_dict = {\n \"pre\": pre,\n \"rec\": rec,\n \"f1\": f1,\n \"myf1\": myf1,\n }\n\n # FIXME: DDP Error: https://github.com/PyTorchLightning/pytorch-lightning/discussions/2529\n # Tensors must be CUDA and dense\n # if self.use_ddp:\n # lesion_metric_dict = torch.FloatTensor([myf1], device=self.device)\n\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f\"{csv}_gt\").item()\n tp = getattr(self, f\"{csv}_tp\").item()\n fp = getattr(self, f\"{csv}_fp\").item()\n pos = getattr(self, f\"{csv}_pos\").item()\n neg = getattr(self, f\"{csv}_neg\").item()\n\n pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)\n rec = tp / (gt + 1e-5)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-5)\n fppi = fp / (pos + neg + 1e-5)\n # myf1 = (pre + rec) / 2.0\n\n lesion_metric_dict = {\n \"gt\": gt,\n \"pos\": pos,\n \"neg\": neg,\n \"pre\": pre,\n \"rec\": rec,\n \"f1\": f1,\n \"fppi\": fppi\n # \"myf1\": myf1,\n }\n\n metric_dict_each_csv[csv] = lesion_metric_dict\n\n return metric_dict_each_csv\n\n\n# Helper functions\ndef calc_iou(bbox_a, bbox_b):\n \"\"\"\n :param a: bbox list [min_y, min_x, max_y, max_x]\n :param b: bbox list [min_y, min_x, max_y, max_x]\n :return:\n \"\"\"\n size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])\n size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])\n\n min_ab_y = max(bbox_a[0], bbox_b[0])\n min_ab_x = max(bbox_a[1], bbox_b[1])\n max_ab_y = min(bbox_a[2], bbox_b[2])\n max_ab_x = min(bbox_a[3], bbox_b[3])\n\n inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)\n\n return inter_ab / (size_a + size_b - inter_ab)\n\n\ndef evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):\n \"\"\"\n :param pred: Prediction Seg Map, shape = (1, num_classes, height, width)\n :param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)\n :param iou_th: Threshold for prediction and gt matching\n :return:\n gt_nums: Ground-truth region numbers\n pred_nums: Prediction region numbers\n tp_nums: True Positive region numbers\n fp_nums: False Positive region numbers\n # 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)\n # Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)\n # i) Region bbox 크기 < 400 pixels\n # ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)\n # issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.\n # # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.\n \"\"\"\n\n if len(pred.shape) > 3:\n pred = pred[0]\n gt = gt[0]\n\n num_classes = pred.shape[0]\n image_size = gt.shape[2]\n\n gt_regions = [\n skimage.measure.regionprops(skimage.measure.label(gt[c, :, :]))\n for c in range(num_classes)\n ]\n for c in range(num_classes):\n gt_regions[c] = [\n r for r in gt_regions[c] if r.area > (20 * (image_size / 2048)) ** 2\n ]\n\n pred_regions = [\n [\n skimage.measure.regionprops(skimage.measure.label(pred[c, :, :] > th))\n for c in range(num_classes)\n ]\n for th in prob_ths\n ] # shape - len(prob_th), num_classes\n\n # 초기화\n gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])\n pred_nums = np.array(\n [\n [len(pred_regions[thi][c]) for c in range(num_classes)]\n for thi in range(len(prob_ths))\n ]\n )\n tp_nums = np.zeros((len(prob_ths), num_classes))\n fp_nums = pred_nums.copy() # .copy() 없으면 포인터가 같아짐\n\n # Gt-Pred Bbox Iou Matrix\n for c in range(num_classes):\n for thi in range(len(prob_ths)):\n if (gt_nums[c] == 0) or (pred_nums[thi][c] == 0): # np array 이상함;\n continue\n\n iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))\n for gi, gr in enumerate(gt_regions[c]):\n for pi, pr in enumerate(pred_regions[thi][c]):\n iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)\n\n tp_nums[thi][c] = np.sum(np.any((iou_matrix >= iou_th), axis=1))\n fp_nums[thi][c] -= np.sum(np.any((iou_matrix > iou_th), axis=0))\n\n return gt_nums, pred_nums, tp_nums, fp_nums",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Hello World!')
print('2nd Test')
<|reserved_special_token_0|>
print(d)
print(d['a'])
<|reserved_special_token_0|>
random.seed(30)
<|reserved_special_token_0|>
print(r)
<|reserved_special_token_0|>
np.random.seed
for i in range(20):
newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]
print(newArray)
<|reserved_special_token_1|>
print('Hello World!')
print('2nd Test')
d = dict()
d['a'] = dict()
d['a']['b'] = 5
d['a']['c'] = 6
d['x'] = dict()
d['x']['y'] = 10
print(d)
print(d['a'])
<|reserved_special_token_0|>
random.seed(30)
r = random.randrange(0, 5)
print(r)
<|reserved_special_token_0|>
np.random.seed
for i in range(20):
newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]
print(newArray)
<|reserved_special_token_1|>
print('Hello World!')
print('2nd Test')
d = dict()
d['a'] = dict()
d['a']['b'] = 5
d['a']['c'] = 6
d['x'] = dict()
d['x']['y'] = 10
print(d)
print(d['a'])
import random
random.seed(30)
r = random.randrange(0, 5)
print(r)
import numpy as np
np.random.seed
for i in range(20):
newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]
print(newArray)
<|reserved_special_token_1|>
print('Hello World!')
print('2nd Test')
d = dict()
d['a'] = dict()
d['a']['b'] = 5
d['a']['c'] = 6
d['x'] = dict()
d['x']['y'] = 10
print(d)
print(d['a'])
import random
random.seed(30)
r = random.randrange(0,5)
print(r)
import numpy as np
np.random.seed
for i in range(20):
newArray = list(set(np.random.random_integers(0, 10, size=(6))))[:3]
print(newArray)
|
flexible
|
{
"blob_id": "e4a60008ca7d61d825b59e6202b40c6be02841cd",
"index": 2024,
"step-1": "<mask token>\n",
"step-2": "print('Hello World!')\nprint('2nd Test')\n<mask token>\nprint(d)\nprint(d['a'])\n<mask token>\nrandom.seed(30)\n<mask token>\nprint(r)\n<mask token>\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]\n print(newArray)\n",
"step-3": "print('Hello World!')\nprint('2nd Test')\nd = dict()\nd['a'] = dict()\nd['a']['b'] = 5\nd['a']['c'] = 6\nd['x'] = dict()\nd['x']['y'] = 10\nprint(d)\nprint(d['a'])\n<mask token>\nrandom.seed(30)\nr = random.randrange(0, 5)\nprint(r)\n<mask token>\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]\n print(newArray)\n",
"step-4": "print('Hello World!')\nprint('2nd Test')\nd = dict()\nd['a'] = dict()\nd['a']['b'] = 5\nd['a']['c'] = 6\nd['x'] = dict()\nd['x']['y'] = 10\nprint(d)\nprint(d['a'])\nimport random\nrandom.seed(30)\nr = random.randrange(0, 5)\nprint(r)\nimport numpy as np\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]\n print(newArray)\n",
"step-5": "print('Hello World!')\nprint('2nd Test')\n\n\n\nd = dict()\nd['a'] = dict()\nd['a']['b'] = 5\nd['a']['c'] = 6\nd['x'] = dict()\nd['x']['y'] = 10\nprint(d)\n\nprint(d['a'])\n\n\nimport random\nrandom.seed(30)\n\nr = random.randrange(0,5)\nprint(r)\n\n\nimport numpy as np\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=(6))))[:3]\n print(newArray)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func(n):
name = n
print(name)
def func1():
nonlocal name
name = 'xiaohong'
print(name)
func1()
print(name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func(n):
name = n
print(name)
def func1():
nonlocal name
name = 'xiaohong'
print(name)
func1()
print(name)
func('lisi')
<|reserved_special_token_1|>
name = ['zhangsan']
def func(n):
name = n
print(name)
def func1():
nonlocal name
name = 'xiaohong'
print(name)
func1()
print(name)
func('lisi')
|
flexible
|
{
"blob_id": "b04aef64dc0485d9112a40e00d178042833a9ddd",
"index": 4294,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef func(n):\n name = n\n print(name)\n\n def func1():\n nonlocal name\n name = 'xiaohong'\n print(name)\n func1()\n print(name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func(n):\n name = n\n print(name)\n\n def func1():\n nonlocal name\n name = 'xiaohong'\n print(name)\n func1()\n print(name)\n\n\nfunc('lisi')\n",
"step-4": "name = ['zhangsan']\n\n\ndef func(n):\n name = n\n print(name)\n\n def func1():\n nonlocal name\n name = 'xiaohong'\n print(name)\n func1()\n print(name)\n\n\nfunc('lisi')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from ethereum.common import mk_transaction_sha, mk_receipt_sha
from ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \
InsufficientStartGas, InvalidNonce, UnsignedTransaction
from ethereum.messages import apply_transaction
from ethereum.slogging import get_logger
from ethereum.utils import encode_hex
from sharding.receipt_consuming_tx_utils import apply_shard_transaction
from sharding.collation import Collation, CollationHeader
log = get_logger('sharding.shard_state_transition')
def mk_collation_from_prevstate(shard_chain, state, coinbase):
"""Make collation from previous state
(refer to ethereum.common.mk_block_from_prevstate)
"""
# state = state or shard_chain.state
collation = Collation(CollationHeader())
collation.header.shard_id = shard_chain.shard_id
collation.header.prev_state_root = state.trie.root_hash
collation.header.coinbase = coinbase
collation.transactions = []
return collation
def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):
"""Add transactions to a collation
(refer to ethereum.common.add_transactions)
"""
if not txqueue:
return
pre_txs = len(collation.transactions)
log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))
while 1:
tx = txqueue.pop_transaction(
max_gas=shard_state.gas_limit - shard_state.gas_used,
min_gasprice=min_gasprice
)
if tx is None:
break
try:
apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)
collation.transactions.append(tx)
except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,
InvalidNonce, UnsignedTransaction) as e:
log.info(str(e))
pass
log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))
def update_collation_env_variables(state, collation):
"""Update collation variables into the state
(refer to ethereum.common.update_block_env_variables)
"""
state.block_coinbase = collation.header.coinbase
def set_execution_results(state, collation):
"""Set state root, receipt root, etc
(ethereum.pow.common.set_execution_results)
"""
collation.header.receipts_root = mk_receipt_sha(state.receipts)
collation.header.tx_list_root = mk_transaction_sha(collation.transactions)
# Notice: commit state before assigning
state.commit()
collation.header.post_state_root = state.trie.root_hash
# TODO: Don't handle in basic sharding currently
# block.header.gas_used = state.gas_used
# block.header.bloom = state.bloom
log.info('Collation pre-sealed, %d gas used' % state.gas_used)
def validate_transaction_tree(collation):
"""Validate that the transaction list root is correct
(refer to ethereum.common.validate_transaction_tree)
"""
if collation.header.tx_list_root != mk_transaction_sha(collation.transactions):
raise ValueError("Transaction root mismatch: header %s computed %s, %d transactions" %
(encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)),
len(collation.transactions)))
return True
def verify_execution_results(state, collation):
"""Verify the results by Merkle Proof
(refer to ethereum.common.verify_execution_results)
"""
state.commit()
validate_transaction_tree(collation)
if collation.header.post_state_root != state.trie.root_hash:
raise ValueError('State root mismatch: header %s computed %s' %
(encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash)))
if collation.header.receipts_root != mk_receipt_sha(state.receipts):
raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' %
(encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)),
state.gas_used, len(state.receipts)))
return True
def finalize(state, coinbase):
"""Apply rewards and commit
(refer to ethereum.pow.consensus.finalize)
"""
delta = int(state.config['COLLATOR_REWARD'])
state.delta_balance(coinbase, delta)
|
normal
|
{
"blob_id": "e364ba45513167966fe50e31a01f552ccedec452",
"index": 6552,
"step-1": "<mask token>\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-2": "<mask token>\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-3": "<mask token>\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-4": "<mask token>\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\ndef verify_execution_results(state, collation):\n \"\"\"Verify the results by Merkle Proof\n (refer to ethereum.common.verify_execution_results)\n \"\"\"\n state.commit()\n validate_transaction_tree(collation)\n if collation.header.post_state_root != state.trie.root_hash:\n raise ValueError('State root mismatch: header %s computed %s' % (\n encode_hex(collation.header.post_state_root), encode_hex(state.\n trie.root_hash)))\n if collation.header.receipts_root != mk_receipt_sha(state.receipts):\n raise ValueError(\n 'Receipt root mismatch: header %s computed %s, computed %d, %d receipts'\n % (encode_hex(collation.header.receipts_root), encode_hex(\n mk_receipt_sha(state.receipts)), state.gas_used, len(state.\n receipts)))\n return True\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-5": "from ethereum.common import mk_transaction_sha, mk_receipt_sha\nfrom ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \\\n InsufficientStartGas, InvalidNonce, UnsignedTransaction\nfrom ethereum.messages import apply_transaction\nfrom ethereum.slogging import get_logger\nfrom ethereum.utils import encode_hex\n\nfrom sharding.receipt_consuming_tx_utils import apply_shard_transaction\nfrom sharding.collation import Collation, CollationHeader\n\nlog = get_logger('sharding.shard_state_transition')\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n # state = state or shard_chain.state\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(\n max_gas=shard_state.gas_limit - shard_state.gas_used,\n min_gasprice=min_gasprice\n )\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,\n InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n\n # Notice: commit state before assigning\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n\n # TODO: Don't handle in basic sharding currently\n # block.header.gas_used = state.gas_used\n # block.header.bloom = state.bloom\n\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.transactions):\n raise ValueError(\"Transaction root mismatch: header %s computed %s, %d transactions\" %\n (encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)),\n len(collation.transactions)))\n return True\n\n\ndef verify_execution_results(state, collation):\n \"\"\"Verify the results by Merkle Proof\n (refer to ethereum.common.verify_execution_results)\n \"\"\"\n state.commit()\n\n validate_transaction_tree(collation)\n\n if collation.header.post_state_root != state.trie.root_hash:\n raise ValueError('State root mismatch: header %s computed %s' %\n (encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash)))\n if collation.header.receipts_root != mk_receipt_sha(state.receipts):\n raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' %\n (encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)),\n state.gas_used, len(state.receipts)))\n\n return True\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if settings.DEBUG:
import debug_toolbar
urlpatterns += path('__debug__/', include(debug_toolbar.urls)),
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', TemplateView.as_view(template_name=
'mainapp/index.html'), name='index'), path('code/<int:pk>',
TemplateView.as_view(template_name='mainapp/index.html'), name='code'),
path('auth/', include('authapp.urls', namespace='authapp')), path(
'api/', include('api.urls', namespace='api')), path('s/<slug:link>',
ShortURLRedirect.as_view(), name='short_link'), path('admin/', admin.
site.urls)]
if settings.DEBUG:
import debug_toolbar
urlpatterns += path('__debug__/', include(debug_toolbar.urls)),
<|reserved_special_token_1|>
from django.conf import settings
from django.contrib import admin
from django.urls import path, include, reverse_lazy
from django.views.generic import RedirectView, TemplateView
from mainapp.views import ShortURLRedirect
urlpatterns = [path('', TemplateView.as_view(template_name=
'mainapp/index.html'), name='index'), path('code/<int:pk>',
TemplateView.as_view(template_name='mainapp/index.html'), name='code'),
path('auth/', include('authapp.urls', namespace='authapp')), path(
'api/', include('api.urls', namespace='api')), path('s/<slug:link>',
ShortURLRedirect.as_view(), name='short_link'), path('admin/', admin.
site.urls)]
if settings.DEBUG:
import debug_toolbar
urlpatterns += path('__debug__/', include(debug_toolbar.urls)),
<|reserved_special_token_1|>
from django.conf import settings
from django.contrib import admin
from django.urls import path, include, reverse_lazy
from django.views.generic import RedirectView, TemplateView
from mainapp.views import ShortURLRedirect
urlpatterns = [
path('', TemplateView.as_view(template_name='mainapp/index.html'), name='index'),
path('code/<int:pk>', TemplateView.as_view(template_name='mainapp/index.html'), name='code'),
path('auth/', include('authapp.urls', namespace='authapp')),
path('api/', include('api.urls', namespace='api')),
path('s/<slug:link>', ShortURLRedirect.as_view(), name='short_link'),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += path('__debug__/', include(debug_toolbar.urls)),
|
flexible
|
{
"blob_id": "573674e50e05880a2822f306c125207b382d872f",
"index": 6389,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += path('__debug__/', include(debug_toolbar.urls)),\n",
"step-3": "<mask token>\nurlpatterns = [path('', TemplateView.as_view(template_name=\n 'mainapp/index.html'), name='index'), path('code/<int:pk>',\n TemplateView.as_view(template_name='mainapp/index.html'), name='code'),\n path('auth/', include('authapp.urls', namespace='authapp')), path(\n 'api/', include('api.urls', namespace='api')), path('s/<slug:link>',\n ShortURLRedirect.as_view(), name='short_link'), path('admin/', admin.\n site.urls)]\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += path('__debug__/', include(debug_toolbar.urls)),\n",
"step-4": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import path, include, reverse_lazy\nfrom django.views.generic import RedirectView, TemplateView\nfrom mainapp.views import ShortURLRedirect\nurlpatterns = [path('', TemplateView.as_view(template_name=\n 'mainapp/index.html'), name='index'), path('code/<int:pk>',\n TemplateView.as_view(template_name='mainapp/index.html'), name='code'),\n path('auth/', include('authapp.urls', namespace='authapp')), path(\n 'api/', include('api.urls', namespace='api')), path('s/<slug:link>',\n ShortURLRedirect.as_view(), name='short_link'), path('admin/', admin.\n site.urls)]\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += path('__debug__/', include(debug_toolbar.urls)),\n",
"step-5": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import path, include, reverse_lazy\nfrom django.views.generic import RedirectView, TemplateView\n\nfrom mainapp.views import ShortURLRedirect\n\nurlpatterns = [\n path('', TemplateView.as_view(template_name='mainapp/index.html'), name='index'),\n path('code/<int:pk>', TemplateView.as_view(template_name='mainapp/index.html'), name='code'),\n\n path('auth/', include('authapp.urls', namespace='authapp')),\n path('api/', include('api.urls', namespace='api')),\n path('s/<slug:link>', ShortURLRedirect.as_view(), name='short_link'),\n\n path('admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += path('__debug__/', include(debug_toolbar.urls)),\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import sympy as sp
from copy import copy
from typing import Any, get_type_hints, Dict
from inspect import getclosurevars, getsource, getargs
import ast
from ast import parse, get_source_segment
from .numpy import NumPy
from .torch import torch_defs
defines = {}
defines.update(torch_defs)
def check_type(item, target):
assert item == target
def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],
ret: Any):
def get_value(v):
if isinstance(v, ast.BinOp):
a = get_value(v.left)
b = get_value(v.right)
return a
elif isinstance(v, ast.Name):
return loc.get(v.id)
elif isinstance(v, ast.Call):
args = [get_value(a) for a in v.args]
func = loc.get(v.func.id, None) or glob.get(v.func.id, None)
return func(*args)
elif isinstance(v, ast.List):
return [get_value(e) for e in v.elts]
elif isinstance(v, ast.Constant):
return v.value
seg = get_source_segment(source, v)
return eval(seg, glob, loc)
for line in body:
if isinstance(line, ast.Return):
value = get_value(line.value)
check_type(value, ret)
elif isinstance(line, ast.If):
loc1, loc2 = copy(loc), copy(loc)
exec_lines(source, line.body, loc1, glob, ret)
exec_lines(source, line.orelse, loc2, glob, ret)
elif isinstance(line, ast.Assign):
value = get_value(line.value)
t = line.targets
else:
exec(get_source_segment(source, line), glob, loc)
def check(func):
args = getargs(func.__code__)
hints = get_type_hints(func)
cv = getclosurevars(func)
loc_vars = {n: Any for n in args.args}
ret = hints.pop('return') if 'return' in hints else None
loc_vars.update(hints)
glob_vars = {}
for k, v in cv.globals.items():
if v is np:
glob_vars[k] = NumPy()
else:
glob_vars[k] = defines.get(v, None) or v
source = getsource(func)
f_ast = parse(source).body[0]
body = f_ast.body
exec_lines(source, body, loc_vars, glob_vars, ret)
defines[func] = 1
return func
|
normal
|
{
"blob_id": "430b5ca7212983743cadc36a2ada987bb721174a",
"index": 3537,
"step-1": "<mask token>\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-2": "<mask token>\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-3": "<mask token>\ndefines = {}\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-4": "import numpy as np\nimport sympy as sp\nfrom copy import copy\nfrom typing import Any, get_type_hints, Dict\nfrom inspect import getclosurevars, getsource, getargs\nimport ast\nfrom ast import parse, get_source_segment\nfrom .numpy import NumPy\nfrom .torch import torch_defs\ndefines = {}\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DenseBlock(nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
out = self.denseblock(x)
return out
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BottleNeck(nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
out = self.bottleneck(x)
out = torch.cat((x, out), 1)
return out
class DenseBlock(nn.Module):
def __init__(self, n_channels, growth_rate, n_DenseBlocks):
super(DenseBlock, self).__init__()
layers = []
for i in range(n_DenseBlocks):
layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)
)
self.denseblock = nn.Sequential(*layers)
def forward(self, x):
out = self.denseblock(x)
return out
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BottleNeck(nn.Module):
def __init__(self, n_channels, growth_rate):
super(BottleNeck, self).__init__()
Channels = 4 * growth_rate
self.bottleneck = nn.Sequential(nn.BatchNorm2d(n_channels), nn.ReLU
(inplace=True), nn.Conv2d(n_channels, Channels, 1, bias=False),
nn.BatchNorm2d(Channels), nn.ReLU(inplace=True), nn.Conv2d(
Channels, growth_rate, 3, padding=1, bias=False))
def forward(self, x):
out = self.bottleneck(x)
out = torch.cat((x, out), 1)
return out
class DenseBlock(nn.Module):
def __init__(self, n_channels, growth_rate, n_DenseBlocks):
super(DenseBlock, self).__init__()
layers = []
for i in range(n_DenseBlocks):
layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)
)
self.denseblock = nn.Sequential(*layers)
def forward(self, x):
out = self.denseblock(x)
return out
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import torch
from torch import nn
class BottleNeck(nn.Module):
def __init__(self, n_channels, growth_rate):
super(BottleNeck, self).__init__()
Channels = 4 * growth_rate
self.bottleneck = nn.Sequential(nn.BatchNorm2d(n_channels), nn.ReLU
(inplace=True), nn.Conv2d(n_channels, Channels, 1, bias=False),
nn.BatchNorm2d(Channels), nn.ReLU(inplace=True), nn.Conv2d(
Channels, growth_rate, 3, padding=1, bias=False))
def forward(self, x):
out = self.bottleneck(x)
out = torch.cat((x, out), 1)
return out
class DenseBlock(nn.Module):
def __init__(self, n_channels, growth_rate, n_DenseBlocks):
super(DenseBlock, self).__init__()
layers = []
for i in range(n_DenseBlocks):
layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)
)
self.denseblock = nn.Sequential(*layers)
def forward(self, x):
out = self.denseblock(x)
return out
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
@File : densenet_block.py
@Time : 12/11/20 9:59 PM
@Author : Mingqiang Ning
@Email : ningmq_cv@foxmail.com
@Modify Time @Version @Description
------------ -------- -----------
12/11/20 9:59 PM 1.0 None
# @Software: PyCharm
"""
import torch
from torch import nn
class BottleNeck(nn.Module):
def __init__(self,n_channels,growth_rate):
super(BottleNeck,self).__init__()
Channels=4*growth_rate
self.bottleneck=nn.Sequential(
nn.BatchNorm2d(n_channels),
nn.ReLU(inplace=True),
nn.Conv2d(n_channels,Channels,1,bias=False),
nn.BatchNorm2d(Channels),
nn.ReLU(inplace=True),
nn.Conv2d(Channels, growth_rate, 3,padding=1, bias=False)
)
def forward(self,x):
out=self.bottleneck(x)
out=torch.cat((x,out),1)
return out
class DenseBlock(nn.Module):
def __init__(self, n_channels, growth_rate,n_DenseBlocks):
super(DenseBlock, self).__init__()
layers=[]
for i in range(n_DenseBlocks):
layers.append(BottleNeck(n_channels+i*growth_rate,growth_rate))
self.denseblock=nn.Sequential(*layers)
def forward(self, x):
out=self.denseblock(x)
return out
|
flexible
|
{
"blob_id": "c2ba18062b8555c77b329718ec1f2ae7f326c78e",
"index": 1988,
"step-1": "<mask token>\n\n\nclass DenseBlock(nn.Module):\n <mask token>\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-2": "<mask token>\n\n\nclass BottleNeck(nn.Module):\n <mask token>\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat((x, out), 1)\n return out\n\n\nclass DenseBlock(nn.Module):\n\n def __init__(self, n_channels, growth_rate, n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers = []\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)\n )\n self.denseblock = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-3": "<mask token>\n\n\nclass BottleNeck(nn.Module):\n\n def __init__(self, n_channels, growth_rate):\n super(BottleNeck, self).__init__()\n Channels = 4 * growth_rate\n self.bottleneck = nn.Sequential(nn.BatchNorm2d(n_channels), nn.ReLU\n (inplace=True), nn.Conv2d(n_channels, Channels, 1, bias=False),\n nn.BatchNorm2d(Channels), nn.ReLU(inplace=True), nn.Conv2d(\n Channels, growth_rate, 3, padding=1, bias=False))\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat((x, out), 1)\n return out\n\n\nclass DenseBlock(nn.Module):\n\n def __init__(self, n_channels, growth_rate, n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers = []\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)\n )\n self.denseblock = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-4": "<mask token>\nimport torch\nfrom torch import nn\n\n\nclass BottleNeck(nn.Module):\n\n def __init__(self, n_channels, growth_rate):\n super(BottleNeck, self).__init__()\n Channels = 4 * growth_rate\n self.bottleneck = nn.Sequential(nn.BatchNorm2d(n_channels), nn.ReLU\n (inplace=True), nn.Conv2d(n_channels, Channels, 1, bias=False),\n nn.BatchNorm2d(Channels), nn.ReLU(inplace=True), nn.Conv2d(\n Channels, growth_rate, 3, padding=1, bias=False))\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat((x, out), 1)\n return out\n\n\nclass DenseBlock(nn.Module):\n\n def __init__(self, n_channels, growth_rate, n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers = []\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)\n )\n self.denseblock = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n@File : densenet_block.py\n@Time : 12/11/20 9:59 PM\n@Author : Mingqiang Ning\n@Email : ningmq_cv@foxmail.com\n@Modify Time @Version @Description\n------------ -------- -----------\n12/11/20 9:59 PM 1.0 None\n# @Software: PyCharm\n\"\"\"\nimport torch\nfrom torch import nn\nclass BottleNeck(nn.Module):\n def __init__(self,n_channels,growth_rate):\n super(BottleNeck,self).__init__()\n Channels=4*growth_rate\n self.bottleneck=nn.Sequential(\n nn.BatchNorm2d(n_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(n_channels,Channels,1,bias=False),\n nn.BatchNorm2d(Channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(Channels, growth_rate, 3,padding=1, bias=False)\n )\n def forward(self,x):\n out=self.bottleneck(x)\n out=torch.cat((x,out),1)\n return out\n\n\nclass DenseBlock(nn.Module):\n def __init__(self, n_channels, growth_rate,n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers=[]\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels+i*growth_rate,growth_rate))\n self.denseblock=nn.Sequential(*layers)\n def forward(self, x):\n out=self.denseblock(x)\n return out\n\n\n\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
<|reserved_special_token_1|>
import random
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
<|reserved_special_token_1|>
# Exercício Python 20: O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
import random
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
# o shuffle embaralha os dados da lista
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
|
flexible
|
{
"blob_id": "445bb8ad8dadd207a3546f4623de583fc47a2910",
"index": 2180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrandom.shuffle(listaAlunos)\nprint('A ordem de apresentação será ', listaAlunos)\n",
"step-3": "<mask token>\naluno1 = input('Primeiro aluno: ')\naluno2 = input('Segundo aluno: ')\naluno3 = input('Terceiro aluno: ')\naluno4 = input('Quarto aluno: ')\nlistaAlunos = [aluno1, aluno2, aluno3, aluno4]\nrandom.shuffle(listaAlunos)\nprint('A ordem de apresentação será ', listaAlunos)\n",
"step-4": "import random\naluno1 = input('Primeiro aluno: ')\naluno2 = input('Segundo aluno: ')\naluno3 = input('Terceiro aluno: ')\naluno4 = input('Quarto aluno: ')\nlistaAlunos = [aluno1, aluno2, aluno3, aluno4]\nrandom.shuffle(listaAlunos)\nprint('A ordem de apresentação será ', listaAlunos)\n",
"step-5": "# Exercício Python 20: O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.\r\nimport random\r\n\r\naluno1 = input('Primeiro aluno: ')\r\naluno2 = input('Segundo aluno: ')\r\naluno3 = input('Terceiro aluno: ')\r\naluno4 = input('Quarto aluno: ')\r\nlistaAlunos = [aluno1, aluno2, aluno3, aluno4]\r\n# o shuffle embaralha os dados da lista\r\nrandom.shuffle(listaAlunos)\r\nprint('A ordem de apresentação será ', listaAlunos)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='cronjob', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('titel', models.CharField(max_length=
255)), ('adresse', models.URLField(max_length=255)), (
'authentifizierung_checked', models.BooleanField(default=False)), (
'benutzername', models.CharField(max_length=255)), ('passwort',
models.CharField(max_length=255)), ('ausführen', models.
DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22,
105756))), ('benachrichtigung_fehlschlag', models.BooleanField(
default=False)), ('benachrichtigung_erfolg', models.BooleanField(
default=False)), ('benachrichtigung_deaktivierung', models.
BooleanField(default=False)), ('antwort_speichern', models.
BooleanField(default=False))])]
<|reserved_special_token_1|>
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='cronjob', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('titel', models.CharField(max_length=
255)), ('adresse', models.URLField(max_length=255)), (
'authentifizierung_checked', models.BooleanField(default=False)), (
'benutzername', models.CharField(max_length=255)), ('passwort',
models.CharField(max_length=255)), ('ausführen', models.
DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22,
105756))), ('benachrichtigung_fehlschlag', models.BooleanField(
default=False)), ('benachrichtigung_erfolg', models.BooleanField(
default=False)), ('benachrichtigung_deaktivierung', models.
BooleanField(default=False)), ('antwort_speichern', models.
BooleanField(default=False))])]
<|reserved_special_token_1|>
# Generated by Django 2.2.6 on 2019-10-10 07:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='cronjob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titel', models.CharField(max_length=255)),
('adresse', models.URLField(max_length=255)),
('authentifizierung_checked', models.BooleanField(default=False)),
('benutzername', models.CharField(max_length=255)),
('passwort', models.CharField(max_length=255)),
('ausführen', models.DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, 105756))),
('benachrichtigung_fehlschlag', models.BooleanField(default=False)),
('benachrichtigung_erfolg', models.BooleanField(default=False)),
('benachrichtigung_deaktivierung', models.BooleanField(default=False)),
('antwort_speichern', models.BooleanField(default=False)),
],
),
]
|
flexible
|
{
"blob_id": "af523777e32c44112bd37a4b9dcbc0941f7e8236",
"index": 4242,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='cronjob', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('titel', models.CharField(max_length=\n 255)), ('adresse', models.URLField(max_length=255)), (\n 'authentifizierung_checked', models.BooleanField(default=False)), (\n 'benutzername', models.CharField(max_length=255)), ('passwort',\n models.CharField(max_length=255)), ('ausführen', models.\n DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, \n 105756))), ('benachrichtigung_fehlschlag', models.BooleanField(\n default=False)), ('benachrichtigung_erfolg', models.BooleanField(\n default=False)), ('benachrichtigung_deaktivierung', models.\n BooleanField(default=False)), ('antwort_speichern', models.\n BooleanField(default=False))])]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='cronjob', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('titel', models.CharField(max_length=\n 255)), ('adresse', models.URLField(max_length=255)), (\n 'authentifizierung_checked', models.BooleanField(default=False)), (\n 'benutzername', models.CharField(max_length=255)), ('passwort',\n models.CharField(max_length=255)), ('ausführen', models.\n DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, \n 105756))), ('benachrichtigung_fehlschlag', models.BooleanField(\n default=False)), ('benachrichtigung_erfolg', models.BooleanField(\n default=False)), ('benachrichtigung_deaktivierung', models.\n BooleanField(default=False)), ('antwort_speichern', models.\n BooleanField(default=False))])]\n",
"step-5": "# Generated by Django 2.2.6 on 2019-10-10 07:02\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='cronjob',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('titel', models.CharField(max_length=255)),\n ('adresse', models.URLField(max_length=255)),\n ('authentifizierung_checked', models.BooleanField(default=False)),\n ('benutzername', models.CharField(max_length=255)),\n ('passwort', models.CharField(max_length=255)),\n ('ausführen', models.DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, 105756))),\n ('benachrichtigung_fehlschlag', models.BooleanField(default=False)),\n ('benachrichtigung_erfolg', models.BooleanField(default=False)),\n ('benachrichtigung_deaktivierung', models.BooleanField(default=False)),\n ('antwort_speichern', models.BooleanField(default=False)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tree.insert(5, tree.root)
tree.insert(15, tree.root)
tree.insert(25, tree.root)
tree.insert(12, tree.root)
tree.insert(35, tree.root)
print(tree.height(tree.root))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tree = BST.BST(10)
tree.insert(5, tree.root)
tree.insert(15, tree.root)
tree.insert(25, tree.root)
tree.insert(12, tree.root)
tree.insert(35, tree.root)
print(tree.height(tree.root))
<|reserved_special_token_1|>
import BST
tree = BST.BST(10)
tree.insert(5, tree.root)
tree.insert(15, tree.root)
tree.insert(25, tree.root)
tree.insert(12, tree.root)
tree.insert(35, tree.root)
print(tree.height(tree.root))
|
flexible
|
{
"blob_id": "59ddb85d55c342342be4edc1fc3b92af701fa6cc",
"index": 4342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntree.insert(5, tree.root)\ntree.insert(15, tree.root)\ntree.insert(25, tree.root)\ntree.insert(12, tree.root)\ntree.insert(35, tree.root)\nprint(tree.height(tree.root))\n",
"step-3": "<mask token>\ntree = BST.BST(10)\ntree.insert(5, tree.root)\ntree.insert(15, tree.root)\ntree.insert(25, tree.root)\ntree.insert(12, tree.root)\ntree.insert(35, tree.root)\nprint(tree.height(tree.root))\n",
"step-4": "import BST\ntree = BST.BST(10)\ntree.insert(5, tree.root)\ntree.insert(15, tree.root)\ntree.insert(25, tree.root)\ntree.insert(12, tree.root)\ntree.insert(35, tree.root)\nprint(tree.height(tree.root))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import os
import re
main_dir = r'C:\Users\Username\Desktop\Python\End-to-End-Data-Analysis\1. Get the Data\table'
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
# Delete extra Loan & Seller columns
loan_seller_cols = [val for val in cmbs.columns.values if re.search('(^Loan\s#|^Seller|^Property\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
# Regex to edit headers
regex_dict = {'_\d': '', '\(.+\)+': '', '#': '', '%': '', r'\/' : '', '\s\s+': ' ', '^\s+': '', '\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
# Delete
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
|
normal
|
{
"blob_id": "eb890c68885cbab032ce9d6f3be3fd7013a2788b",
"index": 2140,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.chdir(main_dir)\n<mask token>\nfor col in loan_seller_cols:\n cmbs.drop(columns=col, axis=1, inplace=True)\n<mask token>\nfor key, value in regex_dict.items():\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\nfor col in list(cmbs.columns.values):\n try:\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\n cmbs.drop(columns=col, axis=1, inplace=True)\n except AttributeError:\n continue\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\n",
"step-3": "<mask token>\nmain_dir = (\n 'C:\\\\Users\\\\Username\\\\Desktop\\\\Python\\\\End-to-End-Data-Analysis\\\\1. Get the Data\\\\table'\n )\nfile = 'CMBS Table.csv'\nos.chdir(main_dir)\ncmbs = pd.read_csv(file, encoding='ISO-8859-1')\nloan_seller_cols = [val for val in cmbs.columns.values if re.search(\n '(^Loan\\\\s#|^Seller|^Property\\\\sName)', val)][3:]\nfor col in loan_seller_cols:\n cmbs.drop(columns=col, axis=1, inplace=True)\nregex_dict = {'_\\\\d': '', '\\\\(.+\\\\)+': '', '#': '', '%': '', '\\\\/': '',\n '\\\\s\\\\s+': ' ', '^\\\\s+': '', '\\\\s+$': ''}\nfor key, value in regex_dict.items():\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\nfor col in list(cmbs.columns.values):\n try:\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\n cmbs.drop(columns=col, axis=1, inplace=True)\n except AttributeError:\n continue\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\n",
"step-4": "import pandas as pd\nimport os\nimport re\nmain_dir = (\n 'C:\\\\Users\\\\Username\\\\Desktop\\\\Python\\\\End-to-End-Data-Analysis\\\\1. Get the Data\\\\table'\n )\nfile = 'CMBS Table.csv'\nos.chdir(main_dir)\ncmbs = pd.read_csv(file, encoding='ISO-8859-1')\nloan_seller_cols = [val for val in cmbs.columns.values if re.search(\n '(^Loan\\\\s#|^Seller|^Property\\\\sName)', val)][3:]\nfor col in loan_seller_cols:\n cmbs.drop(columns=col, axis=1, inplace=True)\nregex_dict = {'_\\\\d': '', '\\\\(.+\\\\)+': '', '#': '', '%': '', '\\\\/': '',\n '\\\\s\\\\s+': ' ', '^\\\\s+': '', '\\\\s+$': ''}\nfor key, value in regex_dict.items():\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\nfor col in list(cmbs.columns.values):\n try:\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\n cmbs.drop(columns=col, axis=1, inplace=True)\n except AttributeError:\n continue\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\n",
"step-5": "import pandas as pd\r\nimport os\r\nimport re\r\n\r\nmain_dir = r'C:\\Users\\Username\\Desktop\\Python\\End-to-End-Data-Analysis\\1. Get the Data\\table'\r\nfile = 'CMBS Table.csv'\r\n\r\nos.chdir(main_dir)\r\n\r\ncmbs = pd.read_csv(file, encoding='ISO-8859-1')\r\n\r\n# Delete extra Loan & Seller columns\r\nloan_seller_cols = [val for val in cmbs.columns.values if re.search('(^Loan\\s#|^Seller|^Property\\sName)', val)][3:]\r\n\r\nfor col in loan_seller_cols:\r\n cmbs.drop(columns=col, axis=1, inplace=True)\r\n\r\n# Regex to edit headers\r\nregex_dict = {'_\\d': '', '\\(.+\\)+': '', '#': '', '%': '', r'\\/' : '', '\\s\\s+': ' ', '^\\s+': '', '\\s+$': ''}\r\n\r\nfor key, value in regex_dict.items():\r\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\r\n\r\n# Delete \r\nfor col in list(cmbs.columns.values):\r\n try:\r\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\r\n cmbs.drop(columns=col, axis=1, inplace=True)\r\n except AttributeError:\r\n continue\r\n\r\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float('inf')
else:
return 1.0 / float(len(completion))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def autocomplete(suggest_tree, bktree, prefix, count=5):
"""Suggest top completions for a prefix given a SuggestTree and BKTree.
Completions for a given prefix are weighted primarily by their weight in the
suggest tree, and secondarily by their Levenshtein distance to words in the
BK-tree (where nearby words are weighted higher)."""
completion_weights = suggest_tree.completion_weights(prefix)
if completion_weights:
weight = lambda completion: completion_weights[completion]
proximity = lambda completion: completion_proximity_score(prefix,
completion)
selection_criteria = lambda completion: (weight(completion),
proximity(completion))
completions = completion_weights.keys()
return heapq.nlargest(count, completions, key=selection_criteria)
else:
matches = bktree.search(prefix)
proximity = lambda completion: edit_distance(prefix, completion)
return heapq.nsmallest(count, matches, key=proximity)
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float('inf')
else:
return 1.0 / float(len(completion))
<|reserved_special_token_1|>
import heapq
from util import edit_distance
def autocomplete(suggest_tree, bktree, prefix, count=5):
"""Suggest top completions for a prefix given a SuggestTree and BKTree.
Completions for a given prefix are weighted primarily by their weight in the
suggest tree, and secondarily by their Levenshtein distance to words in the
BK-tree (where nearby words are weighted higher)."""
completion_weights = suggest_tree.completion_weights(prefix)
if completion_weights:
weight = lambda completion: completion_weights[completion]
proximity = lambda completion: completion_proximity_score(prefix,
completion)
selection_criteria = lambda completion: (weight(completion),
proximity(completion))
completions = completion_weights.keys()
return heapq.nlargest(count, completions, key=selection_criteria)
else:
matches = bktree.search(prefix)
proximity = lambda completion: edit_distance(prefix, completion)
return heapq.nsmallest(count, matches, key=proximity)
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float('inf')
else:
return 1.0 / float(len(completion))
<|reserved_special_token_1|>
import heapq
from util import edit_distance
def autocomplete(suggest_tree, bktree, prefix, count=5):
"""Suggest top completions for a prefix given a SuggestTree and BKTree.
Completions for a given prefix are weighted primarily by their weight in the
suggest tree, and secondarily by their Levenshtein distance to words in the
BK-tree (where nearby words are weighted higher)."""
completion_weights = suggest_tree.completion_weights(prefix)
if completion_weights:
weight = lambda completion: completion_weights[completion]
proximity = lambda completion: completion_proximity_score(
prefix, completion)
selection_criteria = lambda completion: (
weight(completion), proximity(completion))
completions = completion_weights.keys()
return heapq.nlargest(count, completions, key=selection_criteria)
else:
matches = bktree.search(prefix)
proximity = lambda completion: edit_distance(prefix, completion)
return heapq.nsmallest(count, matches, key=proximity)
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float("inf")
else:
return 1.0 / float(len(completion))
|
flexible
|
{
"blob_id": "24891cdefcd061f04e7b7768b1bde4e32b78adcc",
"index": 8690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-3": "<mask token>\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(prefix,\n completion)\n selection_criteria = lambda completion: (weight(completion),\n proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-4": "import heapq\nfrom util import edit_distance\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(prefix,\n completion)\n selection_criteria = lambda completion: (weight(completion),\n proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-5": "import heapq\nfrom util import edit_distance\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(\n prefix, completion)\n selection_criteria = lambda completion: (\n weight(completion), proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n \ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float(\"inf\")\n else:\n return 1.0 / float(len(completion))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CityscapesTestConfig(CityscapesCommonConfig):
<|reserved_special_token_0|>
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':
dict}, 'val': {'type': dict}, 'test': {'type': dict}}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common, 'train': cls.train, 'val': cls.val,
'test': cls.test}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CityscapesCommonConfig(BaseConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CityscapesTrainConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'train.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTrainConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTrainConfig
class CityscapesValConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesValConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesValConfig
class CityscapesTestConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':
dict}, 'val': {'type': dict}, 'test': {'type': dict}}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common, 'train': cls.train, 'val': cls.val,
'test': cls.test}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CityscapesCommonConfig(BaseConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesConfig = {'batch_size': {'type': int}, 'root_path':
{'type': str}, 'num_parallel_batches': {'type': int},
'fixed_size': {'type': bool}}
return rules_CityscapesConfig
class CityscapesTrainConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'train.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTrainConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTrainConfig
class CityscapesValConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesValConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesValConfig
class CityscapesTestConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':
dict}, 'val': {'type': dict}, 'test': {'type': dict}}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common, 'train': cls.train, 'val': cls.val,
'test': cls.test}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CityscapesCommonConfig(BaseConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
root_path = None
num_parallel_batches = 64
fixed_size = True
train_portion = 1.0
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesConfig = {'batch_size': {'type': int}, 'root_path':
{'type': str}, 'num_parallel_batches': {'type': int},
'fixed_size': {'type': bool}}
return rules_CityscapesConfig
class CityscapesTrainConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'train.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTrainConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTrainConfig
class CityscapesValConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesValConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesValConfig
class CityscapesTestConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {'batch_size': {'type': int},
'list_path': {'type': str}}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':
dict}, 'val': {'type': dict}, 'test': {'type': dict}}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common, 'train': cls.train, 'val': cls.val,
'test': cls.test}
<|reserved_special_token_1|>
# -*- coding=utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Default configs."""
from .base import BaseConfig
from zeus.common import ConfigSerializable
class CityscapesCommonConfig(BaseConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
root_path = None
num_parallel_batches = 64
fixed_size = True
train_portion = 1.0
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesConfig = {"batch_size": {"type": int},
"root_path": {"type": str},
"num_parallel_batches": {"type": int},
"fixed_size": {"type": bool}
}
return rules_CityscapesConfig
class CityscapesTrainConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'train.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTrainConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesTrainConfig
class CityscapesValConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesValConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesValConfig
class CityscapesTestConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {"common": {"type": dict},
"train": {"type": dict},
"val": {"type": dict},
"test": {"type": dict}
}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common,
'train': cls.train,
'val': cls.val,
'test': cls.test
}
|
flexible
|
{
"blob_id": "f3da38f2c4fda0a1d54e79c2c21070f98002b88d",
"index": 3351,
"step-1": "<mask token>\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n <mask token>\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-2": "<mask token>\n\n\nclass CityscapesCommonConfig(BaseConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-3": "<mask token>\n\n\nclass CityscapesCommonConfig(BaseConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesConfig = {'batch_size': {'type': int}, 'root_path':\n {'type': str}, 'num_parallel_batches': {'type': int},\n 'fixed_size': {'type': bool}}\n return rules_CityscapesConfig\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-4": "<mask token>\n\n\nclass CityscapesCommonConfig(BaseConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n root_path = None\n num_parallel_batches = 64\n fixed_size = True\n train_portion = 1.0\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesConfig = {'batch_size': {'type': int}, 'root_path':\n {'type': str}, 'num_parallel_batches': {'type': int},\n 'fixed_size': {'type': bool}}\n return rules_CityscapesConfig\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-5": "# -*- coding=utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\"\"\"Default configs.\"\"\"\n\nfrom .base import BaseConfig\nfrom zeus.common import ConfigSerializable\n\n\nclass CityscapesCommonConfig(BaseConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n root_path = None\n num_parallel_batches = 64\n fixed_size = True\n train_portion = 1.0\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesConfig = {\"batch_size\": {\"type\": int},\n \"root_path\": {\"type\": str},\n \"num_parallel_batches\": {\"type\": int},\n \"fixed_size\": {\"type\": bool}\n }\n return rules_CityscapesConfig\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common,\n 'train': cls.train,\n 'val': cls.val,\n 'test': cls.test\n }\n",
"step-ids": [
8,
18,
19,
21,
23
]
}
|
[
8,
18,
19,
21,
23
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(friends[0])
print(friends[1])
print(len(friends))
<|reserved_special_token_0|>
print(friends[0][0])
friends.append('Jen')
print(friends)
new_friends.remove(['Anne', 27])
print(new_friends)
<|reserved_special_token_1|>
friends = ['Rolf', 'Bob', 'Anne']
print(friends[0])
print(friends[1])
print(len(friends))
new_friends = [['Rolf', 24], ['Bob', 30], ['Anne', 27], ['Charlie', 25], [
'Jen', 25], ['Adam', 29]]
print(friends[0][0])
friends.append('Jen')
print(friends)
new_friends.remove(['Anne', 27])
print(new_friends)
<|reserved_special_token_1|>
friends = ["Rolf", "Bob", "Anne"]
print(friends[0])
print(friends[1])
print(len(friends))
new_friends = [
["Rolf", 24],
["Bob", 30],
["Anne", 27],
["Charlie", 25],
["Jen", 25],
["Adam", 29]
]
print(friends[0][0])
friends.append("Jen")
print(friends)
new_friends.remove(["Anne", 27])
print(new_friends)
|
flexible
|
{
"blob_id": "355d60300cbbed817b4512e9b02cc4dd53d1293e",
"index": 2692,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(friends[0])\nprint(friends[1])\nprint(len(friends))\n<mask token>\nprint(friends[0][0])\nfriends.append('Jen')\nprint(friends)\nnew_friends.remove(['Anne', 27])\nprint(new_friends)\n",
"step-3": "friends = ['Rolf', 'Bob', 'Anne']\nprint(friends[0])\nprint(friends[1])\nprint(len(friends))\nnew_friends = [['Rolf', 24], ['Bob', 30], ['Anne', 27], ['Charlie', 25], [\n 'Jen', 25], ['Adam', 29]]\nprint(friends[0][0])\nfriends.append('Jen')\nprint(friends)\nnew_friends.remove(['Anne', 27])\nprint(new_friends)\n",
"step-4": "friends = [\"Rolf\", \"Bob\", \"Anne\"]\n\nprint(friends[0])\nprint(friends[1])\nprint(len(friends))\n\nnew_friends = [\n [\"Rolf\", 24],\n [\"Bob\", 30],\n [\"Anne\", 27],\n [\"Charlie\", 25],\n [\"Jen\", 25],\n [\"Adam\", 29]\n]\nprint(friends[0][0])\n\nfriends.append(\"Jen\")\nprint(friends)\n\nnew_friends.remove([\"Anne\", 27])\nprint(new_friends)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.4 on 2020-12-11 17:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20201211_2158'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('date', models.DateTimeField(auto_now_add=True)),
('std', models.IntegerField()),
('description', models.TextField(blank=True)),
('asker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='core.person')),
],
),
]
|
normal
|
{
"blob_id": "e8011e98da342e501070febf421e9f8d0b74d64e",
"index": 6813,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0016_auto_20201211_2158')]\n operations = [migrations.CreateModel(name='Question', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('title', models.CharField(max_length=\n 256)), ('date', models.DateTimeField(auto_now_add=True)), ('std',\n models.IntegerField()), ('description', models.TextField(blank=True\n )), ('asker', models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, related_name='questions', to='core.person'))])]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0016_auto_20201211_2158')]\n operations = [migrations.CreateModel(name='Question', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('title', models.CharField(max_length=\n 256)), ('date', models.DateTimeField(auto_now_add=True)), ('std',\n models.IntegerField()), ('description', models.TextField(blank=True\n )), ('asker', models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, related_name='questions', to='core.person'))])]\n",
"step-5": "# Generated by Django 3.1.4 on 2020-12-11 17:50\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0016_auto_20201211_2158'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=256)),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('std', models.IntegerField()),\n ('description', models.TextField(blank=True)),\n ('asker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='core.person')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
<|reserved_special_token_0|>
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
<|reserved_special_token_0|>
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if platform.system() == 'Darwin':
DWARFDUMP = 'dwarfdump'
elif platform.system() == 'Linux':
DWARFDUMP = 'llvm-dwarfdump'
else:
raise NotImplementedError('Unknown platform')
<|reserved_special_token_0|>
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
def pretty_print(panicinfo):
if panicinfo['best_guess_source'] == 'call/decl':
try:
print('\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[
'call_file'], panicinfo['call_line']))
except:
print('\t{} -- in function starting at {}:{}'.format(panicinfo[
'addr'], panicinfo['decl_file'], panicinfo['decl_line']))
elif panicinfo['best_guess_source'] == 'parent':
print('\t{} -- at or in function starting at {}:{}'.format(
panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[
'parent_call_line']))
elif panicinfo['best_guess_source'] == 'lineinfo':
print('\t{} -- in closure, try: {}'.format(panicinfo['addr'],
panicinfo['line_info']))
elif panicinfo['best_guess_source'] == 'abstract_origin + line':
print('\t{} -- line_info: {} from origin :{}'.format(panicinfo[
'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))
elif panicinfo['best_guess_source'] == 'call-closure-line-info':
print('\t{} -- in closure starting on line_info: {}'.format(
panicinfo['addr'], panicinfo['line_info']))
else:
raise RuntimeError('Missing best guess source: {}'.format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if platform.system() == 'Darwin':
DWARFDUMP = 'dwarfdump'
elif platform.system() == 'Linux':
DWARFDUMP = 'llvm-dwarfdump'
else:
raise NotImplementedError('Unknown platform')
ARM_OBJDUMP = 'arm-none-eabi-objdump'
RISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'
panic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',
'slice_index_order_fail', 'slice_end_index_len_fail',
'slice_start_index_len_fail', 'slice17len_mismatch_fail',
'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',
'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',
'11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',
'write_char', 'write_str', 'printable5check',
'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',
'10unwrap_err17h6', '13is_whitespace17',
'$u20$core..slice..index..SliceIndex$LT',
'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'
,
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'
]
dw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')
dw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')
line_info_re = re.compile('.*Line info.*')
abstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')
dw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')
dw_at_name_re = re.compile('.*DW_AT_name.*')
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
def pretty_print(panicinfo):
if panicinfo['best_guess_source'] == 'call/decl':
try:
print('\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[
'call_file'], panicinfo['call_line']))
except:
print('\t{} -- in function starting at {}:{}'.format(panicinfo[
'addr'], panicinfo['decl_file'], panicinfo['decl_line']))
elif panicinfo['best_guess_source'] == 'parent':
print('\t{} -- at or in function starting at {}:{}'.format(
panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[
'parent_call_line']))
elif panicinfo['best_guess_source'] == 'lineinfo':
print('\t{} -- in closure, try: {}'.format(panicinfo['addr'],
panicinfo['line_info']))
elif panicinfo['best_guess_source'] == 'abstract_origin + line':
print('\t{} -- line_info: {} from origin :{}'.format(panicinfo[
'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))
elif panicinfo['best_guess_source'] == 'call-closure-line-info':
print('\t{} -- in closure starting on line_info: {}'.format(
panicinfo['addr'], panicinfo['line_info']))
else:
raise RuntimeError('Missing best guess source: {}'.format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
import platform
import re
import subprocess
import sys
if platform.system() == 'Darwin':
DWARFDUMP = 'dwarfdump'
elif platform.system() == 'Linux':
DWARFDUMP = 'llvm-dwarfdump'
else:
raise NotImplementedError('Unknown platform')
ARM_OBJDUMP = 'arm-none-eabi-objdump'
RISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'
panic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',
'slice_index_order_fail', 'slice_end_index_len_fail',
'slice_start_index_len_fail', 'slice17len_mismatch_fail',
'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',
'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',
'11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',
'write_char', 'write_str', 'printable5check',
'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',
'10unwrap_err17h6', '13is_whitespace17',
'$u20$core..slice..index..SliceIndex$LT',
'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'
,
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'
]
dw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')
dw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')
line_info_re = re.compile('.*Line info.*')
abstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')
dw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')
dw_at_name_re = re.compile('.*DW_AT_name.*')
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
def pretty_print(panicinfo):
if panicinfo['best_guess_source'] == 'call/decl':
try:
print('\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[
'call_file'], panicinfo['call_line']))
except:
print('\t{} -- in function starting at {}:{}'.format(panicinfo[
'addr'], panicinfo['decl_file'], panicinfo['decl_line']))
elif panicinfo['best_guess_source'] == 'parent':
print('\t{} -- at or in function starting at {}:{}'.format(
panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[
'parent_call_line']))
elif panicinfo['best_guess_source'] == 'lineinfo':
print('\t{} -- in closure, try: {}'.format(panicinfo['addr'],
panicinfo['line_info']))
elif panicinfo['best_guess_source'] == 'abstract_origin + line':
print('\t{} -- line_info: {} from origin :{}'.format(panicinfo[
'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))
elif panicinfo['best_guess_source'] == 'call-closure-line-info':
print('\t{} -- in closure starting on line_info: {}'.format(
panicinfo['addr'], panicinfo['line_info']))
else:
raise RuntimeError('Missing best guess source: {}'.format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 or the MIT License.
# SPDX-License-Identifier: Apache-2.0 OR MIT
# Copyright Tock Contributors 2023.
# Prints out the source locations of panics in a Tock kernel ELF
#
# This tool attempts to trace all panic locations in a Tock kernel ELF by
# tracing calls to panic functions in the core library, using the debug information
# embedded in the ELF file. This tool requires an ELF which includes debug information.
# In its current state, cannot accurately provide the source locations
# corresponding to each panic, but tries to be honest about its confidence in
# each guess. In general, each guess is usually enough to locate the relevant panic.
# More creative analysis might be able to increase
# the accuracy with which this tool can identify source locations of panics. For now,
# this tool is useful for:
#
# - obtaining a rough count of the number of panics in a Tock kernel binary
#
# - finding and removing panics in a Tock kernel binary
#
# - roughly determining which components of a Tock kernel binary contain the most panic
# paths
#
# There are several assumptions built into this tool which may not always hold. For one,
# the list of panic_functions are assumed to not match any strings in the actual
# codebase, despite the fact they are incomplete function names and overlap is possible.
# I could solve this by using full names of these functions, but I am unsure how often
# the name mangling of these functions will change as the rust compiler changes so this
# approach felt potentially more stable.
#
# Several assumptions are made about DWARF locations that do not always hold, so source
# locations are not always accurate -- sometimes, the printed location just points to
# the function containing a panic, rather than the actual line on which the panic
# occurs. Some assumptions about which panics are in the core library and will be
# caught by grepping for other calls may also not always hold. The best way to inspect
# these is by manually inspecting the panics in the `within_core_panic_list`.
#
# This script stores panics which it cannot trace out of the core library in the
# `no_info_panic_list`. If this list contains some panics, that is a sign that some
# panics have not been identified. You can manually look at the addresses stored in
# this list, attempt to find the core library function which leads to these instrucitons
# being called, and then add those core library functions to the list of panic functions.
#
# The output of this script is *not* stable.
#
# Usage: find_panics.py ELF [--riscv]
#
# Requires Python 3.7+
#
# Author: Hudson Ayers <hayers@.stanford.edu>
import argparse
import platform
import re
import subprocess
import sys
if platform.system() == 'Darwin':
DWARFDUMP = "dwarfdump"
elif platform.system() == 'Linux':
DWARFDUMP = "llvm-dwarfdump"
else:
raise NotImplementedError("Unknown platform")
# Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump
ARM_OBJDUMP = "arm-none-eabi-objdump"
RISCV_OBJDUMP = "riscv64-unknown-elf-objdump"
# TODO: For all functions below the initial batch, it would like be preferable to
# automatically populate the list with additional functions in the core library using
# debug info. For now, however, I do this manually.
panic_functions = [
"expect_failed",
"unwrap_failed",
"panic_bounds_check",
"slice_index_order_fail",
"slice_end_index_len_fail",
"slice_start_index_len_fail",
"slice17len_mismatch_fail",
"str16slice_error_fail",
"copy_from_slice17len_mismatch_fail",
"copy_from_slice17",
"panicking5panic",
# below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold
"6unwrap17",
"6expect17",
"11copy_within17",
"core..fmt..builders..PadAdapter", # calls slice_error_fail
"11copy_within17", # calls panicking::panic
"write_char", # calls PadAdapter one above
"write_str", # calls write_char
"printable5check", # calls slice_index_order_fail
"char$u20$as$u20$core..fmt..Debug", # calls printable5check
"GenericRadix7fmt_int", # calls slice_start_index_len_fail
# below are functions I manually traced on an arm binary,
# with a somewhat higher inline threshold.
"10unwrap_err17h6",
"13is_whitespace17",
"$u20$core..slice..index..SliceIndex$LT",
"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo["addr"] = addr
panicinfo["function"] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo["line_info"] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string:
raise RuntimeError("I misunderstand DWARF")
if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string:
filename = file_string.split('"')[1]
line_num = line_string.split("(")[1].split(")")[0]
if "DW_AT_call_file" in file_string:
panicinfo["call_file"] = filename
panicinfo["call_line"] = line_num
if "DW_AT_decl_file" in file_string:
panicinfo["decl_file"] = filename
panicinfo["decl_line"] = line_num
if not "/core/" in filename:
if not "closure" in abstract_origin_string:
panicinfo["best_guess_source"] = "call/decl"
else:
panicinfo["best_guess_source"] = "call-closure-line-info"
panic_list.append(panicinfo)
continue
else: # 'core' in filename
(parent_file, parent_line) = check_for_source_in_parent(elf, addr)
if parent_file:
panicinfo["parent_call_file"] = parent_file
panicinfo["parent_call_line"] = parent_line
panicinfo["best_guess_source"] = "parent"
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if "core" in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr)
name3 = any_linkage_matches_panic_func(elf, addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif "closure" in abstract_origin_string:
# not in core, in closure, line info is probably sufficient
panicinfo["best_guess_source"] = "lineinfo"
panic_list.append(panicinfo)
continue
else:
# i have not seen this happen -- core in file, not closure, origin not core
raise RuntimeError("Unhandled")
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
"Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format(
linkage_name_string, addr
)
)
continue
no_info_panic_list.append(panic_info)
print("did not find source for panic: {}".format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo["abstract_origin"] = origin
if "core" in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
"Probably could add this origin or one of its parents to the panic function list: {}".format(
abstract_origin_string
)
)
continue
else:
panicinfo["best_guess_source"] = "abstract_origin + line"
panic_list.append(panicinfo)
continue
else:
# This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[
-1
].strip() # see multiple matches for this string sometimes
function_name = dw_at_name_string.split('"')[1]
if "OUTLINED_FUNCTION_" in function_name:
# This is a common pattern where panicing paths are repeated in many
# places throughout the binary, and LLVMs optimizer outlines the repeated code.
# Let's add these to the list of panicing functions, dynamically so this is resilient to
# changes in the binary.
if function_name not in panic_functions:
# don't double insert
panic_functions.append(
function_name + ">"
) # so FUNCTION_22 does not catch FUNCTION_222
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
# There seem to be a places where lookup fails completely
# Not easy to recover, log these and continue on.
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError("BUG: Should not reach here")
return (panic_list, within_core_panic_list, no_info_panic_list)
def pretty_print(panicinfo):
if panicinfo["best_guess_source"] == "call/decl":
try:
print(
"\t{} -- {}:{}".format(
panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"]
)
)
except:
print(
"\t{} -- in function starting at {}:{}".format(
panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"]
)
)
elif panicinfo["best_guess_source"] == "parent":
print(
"\t{} -- at or in function starting at {}:{}".format(
panicinfo["addr"],
panicinfo["parent_call_file"],
panicinfo["parent_call_line"],
)
)
elif panicinfo["best_guess_source"] == "lineinfo":
print(
"\t{} -- in closure, try: {}".format(
panicinfo["addr"], panicinfo["line_info"]
)
)
elif panicinfo["best_guess_source"] == "abstract_origin + line":
print(
"\t{} -- line_info: {} from origin :{}".format(
panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"]
)
)
elif panicinfo["best_guess_source"] == "call-closure-line-info":
print(
"\t{} -- in closure starting on line_info: {}".format(
panicinfo["addr"], panicinfo["line_info"]
)
)
else:
raise RuntimeError("Missing best guess source: {}".format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print("This tool requires Python 3.7+")
return -1
print("Tock panic report for " + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
(panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics(
objdump, args.ELF, args.riscv
)
print("num_panics: {}".format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic["function"]].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print("{}: {}".format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print("num panics in core ignored: {}".format(len(within_core_panic_list)))
print("num panics for which no info available: {}".format(len(no_info_panic_list)))
if args.verbose:
print(
"If more debug info is needed, run dwarfdump directly on the address in question."
)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "8c0a4d5a86d9ebd38ea05efb5b5b570368ce1449",
"index": 1336,
"step-1": "<mask token>\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\n<mask token>\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\n<mask token>\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\nif platform.system() == 'Darwin':\n DWARFDUMP = 'dwarfdump'\nelif platform.system() == 'Linux':\n DWARFDUMP = 'llvm-dwarfdump'\nelse:\n raise NotImplementedError('Unknown platform')\n<mask token>\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\ndef pretty_print(panicinfo):\n if panicinfo['best_guess_source'] == 'call/decl':\n try:\n print('\\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[\n 'call_file'], panicinfo['call_line']))\n except:\n print('\\t{} -- in function starting at {}:{}'.format(panicinfo[\n 'addr'], panicinfo['decl_file'], panicinfo['decl_line']))\n elif panicinfo['best_guess_source'] == 'parent':\n print('\\t{} -- at or in function starting at {}:{}'.format(\n panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[\n 'parent_call_line']))\n elif panicinfo['best_guess_source'] == 'lineinfo':\n print('\\t{} -- in closure, try: {}'.format(panicinfo['addr'],\n panicinfo['line_info']))\n elif panicinfo['best_guess_source'] == 'abstract_origin + line':\n print('\\t{} -- line_info: {} from origin :{}'.format(panicinfo[\n 'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))\n elif panicinfo['best_guess_source'] == 'call-closure-line-info':\n print('\\t{} -- in closure starting on line_info: {}'.format(\n panicinfo['addr'], panicinfo['line_info']))\n else:\n raise RuntimeError('Missing best guess source: {}'.format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nif platform.system() == 'Darwin':\n DWARFDUMP = 'dwarfdump'\nelif platform.system() == 'Linux':\n DWARFDUMP = 'llvm-dwarfdump'\nelse:\n raise NotImplementedError('Unknown platform')\nARM_OBJDUMP = 'arm-none-eabi-objdump'\nRISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'\npanic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',\n 'slice_index_order_fail', 'slice_end_index_len_fail',\n 'slice_start_index_len_fail', 'slice17len_mismatch_fail',\n 'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',\n 'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',\n '11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',\n 'write_char', 'write_str', 'printable5check',\n 'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',\n '10unwrap_err17h6', '13is_whitespace17',\n '$u20$core..slice..index..SliceIndex$LT',\n 'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'\n ,\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'\n ]\ndw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')\ndw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')\nline_info_re = re.compile('.*Line info.*')\nabstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')\ndw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')\ndw_at_name_re = re.compile('.*DW_AT_name.*')\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\ndef pretty_print(panicinfo):\n if panicinfo['best_guess_source'] == 'call/decl':\n try:\n print('\\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[\n 'call_file'], panicinfo['call_line']))\n except:\n print('\\t{} -- in function starting at {}:{}'.format(panicinfo[\n 'addr'], panicinfo['decl_file'], panicinfo['decl_line']))\n elif panicinfo['best_guess_source'] == 'parent':\n print('\\t{} -- at or in function starting at {}:{}'.format(\n panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[\n 'parent_call_line']))\n elif panicinfo['best_guess_source'] == 'lineinfo':\n print('\\t{} -- in closure, try: {}'.format(panicinfo['addr'],\n panicinfo['line_info']))\n elif panicinfo['best_guess_source'] == 'abstract_origin + line':\n print('\\t{} -- line_info: {} from origin :{}'.format(panicinfo[\n 'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))\n elif panicinfo['best_guess_source'] == 'call-closure-line-info':\n print('\\t{} -- in closure starting on line_info: {}'.format(\n panicinfo['addr'], panicinfo['line_info']))\n else:\n raise RuntimeError('Missing best guess source: {}'.format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nimport platform\nimport re\nimport subprocess\nimport sys\nif platform.system() == 'Darwin':\n DWARFDUMP = 'dwarfdump'\nelif platform.system() == 'Linux':\n DWARFDUMP = 'llvm-dwarfdump'\nelse:\n raise NotImplementedError('Unknown platform')\nARM_OBJDUMP = 'arm-none-eabi-objdump'\nRISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'\npanic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',\n 'slice_index_order_fail', 'slice_end_index_len_fail',\n 'slice_start_index_len_fail', 'slice17len_mismatch_fail',\n 'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',\n 'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',\n '11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',\n 'write_char', 'write_str', 'printable5check',\n 'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',\n '10unwrap_err17h6', '13is_whitespace17',\n '$u20$core..slice..index..SliceIndex$LT',\n 'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'\n ,\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'\n ]\ndw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')\ndw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')\nline_info_re = re.compile('.*Line info.*')\nabstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')\ndw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')\ndw_at_name_re = re.compile('.*DW_AT_name.*')\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\ndef pretty_print(panicinfo):\n if panicinfo['best_guess_source'] == 'call/decl':\n try:\n print('\\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[\n 'call_file'], panicinfo['call_line']))\n except:\n print('\\t{} -- in function starting at {}:{}'.format(panicinfo[\n 'addr'], panicinfo['decl_file'], panicinfo['decl_line']))\n elif panicinfo['best_guess_source'] == 'parent':\n print('\\t{} -- at or in function starting at {}:{}'.format(\n panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[\n 'parent_call_line']))\n elif panicinfo['best_guess_source'] == 'lineinfo':\n print('\\t{} -- in closure, try: {}'.format(panicinfo['addr'],\n panicinfo['line_info']))\n elif panicinfo['best_guess_source'] == 'abstract_origin + line':\n print('\\t{} -- line_info: {} from origin :{}'.format(panicinfo[\n 'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))\n elif panicinfo['best_guess_source'] == 'call-closure-line-info':\n print('\\t{} -- in closure starting on line_info: {}'.format(\n panicinfo['addr'], panicinfo['line_info']))\n else:\n raise RuntimeError('Missing best guess source: {}'.format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n# Licensed under the Apache License, Version 2.0 or the MIT License.\n# SPDX-License-Identifier: Apache-2.0 OR MIT\n# Copyright Tock Contributors 2023.\n\n# Prints out the source locations of panics in a Tock kernel ELF\n#\n# This tool attempts to trace all panic locations in a Tock kernel ELF by\n# tracing calls to panic functions in the core library, using the debug information\n# embedded in the ELF file. This tool requires an ELF which includes debug information.\n# In its current state, cannot accurately provide the source locations\n# corresponding to each panic, but tries to be honest about its confidence in\n# each guess. In general, each guess is usually enough to locate the relevant panic.\n# More creative analysis might be able to increase\n# the accuracy with which this tool can identify source locations of panics. For now,\n# this tool is useful for:\n#\n# - obtaining a rough count of the number of panics in a Tock kernel binary\n#\n# - finding and removing panics in a Tock kernel binary\n#\n# - roughly determining which components of a Tock kernel binary contain the most panic\n# paths\n#\n# There are several assumptions built into this tool which may not always hold. For one,\n# the list of panic_functions are assumed to not match any strings in the actual\n# codebase, despite the fact they are incomplete function names and overlap is possible.\n# I could solve this by using full names of these functions, but I am unsure how often\n# the name mangling of these functions will change as the rust compiler changes so this\n# approach felt potentially more stable.\n#\n# Several assumptions are made about DWARF locations that do not always hold, so source\n# locations are not always accurate -- sometimes, the printed location just points to\n# the function containing a panic, rather than the actual line on which the panic\n# occurs. Some assumptions about which panics are in the core library and will be\n# caught by grepping for other calls may also not always hold. The best way to inspect\n# these is by manually inspecting the panics in the `within_core_panic_list`.\n#\n# This script stores panics which it cannot trace out of the core library in the\n# `no_info_panic_list`. If this list contains some panics, that is a sign that some\n# panics have not been identified. You can manually look at the addresses stored in\n# this list, attempt to find the core library function which leads to these instrucitons\n# being called, and then add those core library functions to the list of panic functions.\n#\n# The output of this script is *not* stable.\n#\n# Usage: find_panics.py ELF [--riscv]\n#\n# Requires Python 3.7+\n#\n# Author: Hudson Ayers <hayers@.stanford.edu>\n\nimport argparse\nimport platform\nimport re\nimport subprocess\nimport sys\n\n\nif platform.system() == 'Darwin':\n DWARFDUMP = \"dwarfdump\"\nelif platform.system() == 'Linux':\n DWARFDUMP = \"llvm-dwarfdump\"\nelse:\n raise NotImplementedError(\"Unknown platform\")\n# Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump\nARM_OBJDUMP = \"arm-none-eabi-objdump\"\nRISCV_OBJDUMP = \"riscv64-unknown-elf-objdump\"\n\n# TODO: For all functions below the initial batch, it would like be preferable to\n# automatically populate the list with additional functions in the core library using\n# debug info. For now, however, I do this manually.\npanic_functions = [\n \"expect_failed\",\n \"unwrap_failed\",\n \"panic_bounds_check\",\n \"slice_index_order_fail\",\n \"slice_end_index_len_fail\",\n \"slice_start_index_len_fail\",\n \"slice17len_mismatch_fail\",\n \"str16slice_error_fail\",\n \"copy_from_slice17len_mismatch_fail\",\n \"copy_from_slice17\",\n \"panicking5panic\",\n # below are functions I have manually traced up from the above, more \"core\" panics, on a riscv binary with a low inline threshold\n \"6unwrap17\",\n \"6expect17\",\n \"11copy_within17\",\n \"core..fmt..builders..PadAdapter\", # calls slice_error_fail\n \"11copy_within17\", # calls panicking::panic\n \"write_char\", # calls PadAdapter one above\n \"write_str\", # calls write_char\n \"printable5check\", # calls slice_index_order_fail\n \"char$u20$as$u20$core..fmt..Debug\", # calls printable5check\n \"GenericRadix7fmt_int\", # calls slice_start_index_len_fail\n # below are functions I manually traced on an arm binary,\n # with a somewhat higher inline threshold.\n \"10unwrap_err17h6\",\n \"13is_whitespace17\",\n \"$u20$core..slice..index..SliceIndex$LT\",\n \"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter\",\n \"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE\",\n \"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE\",\n]\n\n# Pre-compiled regex lookups\ndw_at_file_re = re.compile(r\"\"\".*(?:DW_AT_call_file|DW_AT_decl_file).*\"\"\")\ndw_at_line_re = re.compile(r\"\"\".*(?:DW_AT_call_line|DW_AT_decl_line).*\"\"\")\nline_info_re = re.compile(r\"\"\".*Line info.*\"\"\")\nabstract_origin_re = re.compile(r\"\"\".*DW_AT_abstract_origin.*\"\"\")\ndw_at_linkage_name_re = re.compile(r\"\"\".*DW_AT_linkage_name.*\"\"\")\ndw_at_name_re = re.compile(r\"\"\".*DW_AT_name.*\"\"\")\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return \"\"\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, \"-p\", elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return \"\"\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return \"\"\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, \"-p\", elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n\n source_files = list(map(getFile, matches))\n for (i, f) in enumerate(source_files[::-1]):\n if \"/core/\" not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split(\"(\")[1].split(\")\")[0]\n\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return (f, source_line)\n return (\"\", \"\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"ELF\", help=\"ELF file for analysis\")\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"Output additional DWARF info for each panic location in the binary\",\n )\n parser.add_argument(\"--riscv\", action=\"store_true\", help=\"Use risc-v based objdump\")\n return parser.parse_args()\n\n\n# Find all addresses that panic, and get basic dwarf info on those addresses\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, \"-d\", elf), capture_output=True, text=True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile(\".*:.*#.*\" + function + \".*\")\n if not is_riscv:\n # Arm-none-eabi-objdump uses ';' for comments instead of '#'\n function_re = re.compile(\".*:.*<.*\" + function + \".*\")\n # TODO: arm elfs include loads of offsets from symbols in such a way that these lines\n # are matched by this regex. In general, these loads occur within the instruction stream\n # associated with the symbol at hand, and will usually be excluded by logic later in\n # this function. This leads to `within_core_panic_list` and `no_info_panic_list`\n # containing more \"panics\" than when analyzing a risc-v binary. We could fix this\n # by matching *only* on functions with instructions that actually jump to a new symbol,\n # but this would require a list of such instructions for each architecture. However\n # as written it actually lets us identify panics which are jumped to via addresses\n # stored in registers, which may actually catch additional valid panics.\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(\":\")[0]\n\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = \"\"\n line_string = \"\"\n line_info_string = \"\"\n abstract_origin_string = \"\"\n linkage_name_string = \"\"\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo[\"addr\"] = addr\n panicinfo[\"function\"] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo[\"line_info\"] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if \"DW_AT_call_file\" in file_string and \"DW_AT_decl_file\" in file_string:\n raise RuntimeError(\"I misunderstand DWARF\")\n if \"DW_AT_call_file\" in file_string or \"DW_AT_decl_file\" in file_string:\n filename = file_string.split('\"')[1]\n line_num = line_string.split(\"(\")[1].split(\")\")[0]\n if \"DW_AT_call_file\" in file_string:\n panicinfo[\"call_file\"] = filename\n panicinfo[\"call_line\"] = line_num\n if \"DW_AT_decl_file\" in file_string:\n panicinfo[\"decl_file\"] = filename\n panicinfo[\"decl_line\"] = line_num\n if not \"/core/\" in filename:\n if not \"closure\" in abstract_origin_string:\n panicinfo[\"best_guess_source\"] = \"call/decl\"\n else:\n panicinfo[\"best_guess_source\"] = \"call-closure-line-info\"\n panic_list.append(panicinfo)\n continue\n else: # 'core' in filename\n (parent_file, parent_line) = check_for_source_in_parent(elf, addr)\n if parent_file:\n panicinfo[\"parent_call_file\"] = parent_file\n panicinfo[\"parent_call_line\"] = parent_line\n panicinfo[\"best_guess_source\"] = \"parent\"\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if \"core\" in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr)\n name3 = any_linkage_matches_panic_func(elf, addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif \"closure\" in abstract_origin_string:\n # not in core, in closure, line info is probably sufficient\n panicinfo[\"best_guess_source\"] = \"lineinfo\"\n panic_list.append(panicinfo)\n continue\n else:\n # i have not seen this happen -- core in file, not closure, origin not core\n raise RuntimeError(\"Unhandled\")\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n \"Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}\".format(\n linkage_name_string, addr\n )\n )\n continue\n no_info_panic_list.append(panic_info)\n print(\"did not find source for panic: {}\".format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo[\"abstract_origin\"] = origin\n if \"core\" in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n \"Probably could add this origin or one of its parents to the panic function list: {}\".format(\n abstract_origin_string\n )\n )\n continue\n else:\n panicinfo[\"best_guess_source\"] = \"abstract_origin + line\"\n panic_list.append(panicinfo)\n continue\n else:\n # This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[\n -1\n ].strip() # see multiple matches for this string sometimes\n function_name = dw_at_name_string.split('\"')[1]\n if \"OUTLINED_FUNCTION_\" in function_name:\n # This is a common pattern where panicing paths are repeated in many\n # places throughout the binary, and LLVMs optimizer outlines the repeated code.\n # Let's add these to the list of panicing functions, dynamically so this is resilient to\n # changes in the binary.\n if function_name not in panic_functions:\n # don't double insert\n panic_functions.append(\n function_name + \">\"\n ) # so FUNCTION_22 does not catch FUNCTION_222\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n # There seem to be a places where lookup fails completely\n # Not easy to recover, log these and continue on.\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError(\"BUG: Should not reach here\")\n return (panic_list, within_core_panic_list, no_info_panic_list)\n\n\ndef pretty_print(panicinfo):\n if panicinfo[\"best_guess_source\"] == \"call/decl\":\n try:\n print(\n \"\\t{} -- {}:{}\".format(\n panicinfo[\"addr\"], panicinfo[\"call_file\"], panicinfo[\"call_line\"]\n )\n )\n except:\n print(\n \"\\t{} -- in function starting at {}:{}\".format(\n panicinfo[\"addr\"], panicinfo[\"decl_file\"], panicinfo[\"decl_line\"]\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"parent\":\n print(\n \"\\t{} -- at or in function starting at {}:{}\".format(\n panicinfo[\"addr\"],\n panicinfo[\"parent_call_file\"],\n panicinfo[\"parent_call_line\"],\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"lineinfo\":\n print(\n \"\\t{} -- in closure, try: {}\".format(\n panicinfo[\"addr\"], panicinfo[\"line_info\"]\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"abstract_origin + line\":\n print(\n \"\\t{} -- line_info: {} from origin :{}\".format(\n panicinfo[\"addr\"], panicinfo[\"line_info\"], panicinfo[\"abstract_origin\"]\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"call-closure-line-info\":\n print(\n \"\\t{} -- in closure starting on line_info: {}\".format(\n panicinfo[\"addr\"], panicinfo[\"line_info\"]\n )\n )\n else:\n raise RuntimeError(\"Missing best guess source: {}\".format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print(\"This tool requires Python 3.7+\")\n return -1\n print(\"Tock panic report for \" + args.ELF)\n\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n\n (panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics(\n objdump, args.ELF, args.riscv\n )\n print(\"num_panics: {}\".format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic[\"function\"]].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print(\"{}: {}\".format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n\n print(\"num panics in core ignored: {}\".format(len(within_core_panic_list)))\n print(\"num panics for which no info available: {}\".format(len(no_info_panic_list)))\n if args.verbose:\n print(\n \"If more debug info is needed, run dwarfdump directly on the address in question.\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
7,
10,
11,
12,
13
]
}
|
[
7,
10,
11,
12,
13
] |
_all__ = ["minning_algo"]
|
normal
|
{
"blob_id": "5a7b68648898818e0db47f225f3d4b0972cd5b99",
"index": 7521,
"step-1": "<mask token>\n",
"step-2": "_all__ = ['minning_algo']\n",
"step-3": "_all__ = [\"minning_algo\"]\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 11:43:58 2020
@author: Dr. Tang
"""
import tensorflow as tf
# 需要你编程:将下面转换成tensorflow
#x = 10
#y = 2
#u=x/y
#z = u- 1
x=tf.placeholder(tf.int32)
y=tf.placeholder(tf.int32)
u=tf.divide(x,y)
z=tf.subtract(u,tf.constant(1.0,dtype=tf.float64))
# 需要你编程:从session中打印 z
with tf.Session() as sess:
output=sess.run(z,feed_dict={x:10,y:2})
print(output)
|
normal
|
{
"blob_id": "ca91052072d7b2da5729cf55f7f4ba4b54608017",
"index": 3477,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as sess:\n output = sess.run(z, feed_dict={x: 10, y: 2})\n print(output)\n",
"step-3": "<mask token>\nx = tf.placeholder(tf.int32)\ny = tf.placeholder(tf.int32)\nu = tf.divide(x, y)\nz = tf.subtract(u, tf.constant(1.0, dtype=tf.float64))\nwith tf.Session() as sess:\n output = sess.run(z, feed_dict={x: 10, y: 2})\n print(output)\n",
"step-4": "<mask token>\nimport tensorflow as tf\nx = tf.placeholder(tf.int32)\ny = tf.placeholder(tf.int32)\nu = tf.divide(x, y)\nz = tf.subtract(u, tf.constant(1.0, dtype=tf.float64))\nwith tf.Session() as sess:\n output = sess.run(z, feed_dict={x: 10, y: 2})\n print(output)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 13 11:43:58 2020\n\n@author: Dr. Tang\n\"\"\"\n\nimport tensorflow as tf\n# 需要你编程:将下面转换成tensorflow\n#x = 10\n#y = 2\n#u=x/y\n#z = u- 1\n\nx=tf.placeholder(tf.int32)\ny=tf.placeholder(tf.int32)\nu=tf.divide(x,y)\nz=tf.subtract(u,tf.constant(1.0,dtype=tf.float64))\n# 需要你编程:从session中打印 z\nwith tf.Session() as sess:\n output=sess.run(z,feed_dict={x:10,y:2})\n print(output)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def compare_images(target, ref):
scores = []
scores.append(psnr(target, ref))
scores.append(mse(target, ref))
scores.append(ssim(target, ref, multichannel=True))
return scores
def prepare_images(path, factor):
for file in os.listdir(path):
img = cv2.imread(path + '/' + file)
h, w, c = img.shape
new_height = h / factor
new_width = w / factor
img = cv2.resize(img, (int(new_width), int(new_height)),
interpolation=cv2.INTER_LINEAR)
img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)
print('Saving {}'.format(file))
cv2.imwrite('images//{}'.format(file), img)
<|reserved_special_token_0|>
def modcrop(img, scale):
tmpsz = img.shape
sz = tmpsz[0:2]
sz = sz - np.mod(sz, scale)
img = img[0:sz[0], 1:sz[1]]
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def psnr(target, ref):
target_data = target.astype(float)
ref_data = ref.astype(float)
diff = ref_data - target_data
diff = diff.flatten('C')
rmse = math.sqrt(np.mean(diff ** 2))
return 20 * math.log10(255.0 / rmse)
def mse(target, ref):
err = np.sum(target.astype('float') ** 2)
err /= float(target.shape[0] * target.shape[1])
return err
def compare_images(target, ref):
scores = []
scores.append(psnr(target, ref))
scores.append(mse(target, ref))
scores.append(ssim(target, ref, multichannel=True))
return scores
def prepare_images(path, factor):
for file in os.listdir(path):
img = cv2.imread(path + '/' + file)
h, w, c = img.shape
new_height = h / factor
new_width = w / factor
img = cv2.resize(img, (int(new_width), int(new_height)),
interpolation=cv2.INTER_LINEAR)
img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)
print('Saving {}'.format(file))
cv2.imwrite('images//{}'.format(file), img)
<|reserved_special_token_0|>
def modcrop(img, scale):
tmpsz = img.shape
sz = tmpsz[0:2]
sz = sz - np.mod(sz, scale)
img = img[0:sz[0], 1:sz[1]]
return img
def shave(image, border):
img = image[border:-border, border:-border]
return img
def predict(image_path):
srcnn = model()
srcnn.load_weights('3051crop_weight_200.h5')
path, file = os.path.split(image_path)
degraded = cv2.imread(image_path)
ref = cv2.imread('source_images/{}'.format(file))
ref = modcrop(ref, 3)
degraded = modcrop(degraded, 3)
temp = cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)
Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype=float)
Y[0, :, :, 0] = temp[:, :, 0].astype(float) / 255
pre = srcnn.predict(Y, batch_size=1)
pre *= 255
pre[pre[:] > 255] = 255
pre[pre[:] < 0] = 0
pre = pre.astype(np.uint8)
temp = shave(temp, 6)
temp[:, :, 0] = pre[0, :, :, 0]
output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)
ref = shave(ref.astype(np.uint8), 6)
degraded = shave(degraded.astype(np.uint8), 6)
scores = []
scores.append(compare_images(degraded, ref))
scores.append(compare_images(output, ref))
return ref, degraded, output, scores
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Python: {}'.format(sys.version))
print('Numpy: {}'.format(numpy.__version__))
print('Keras: {}'.format(keras.__version__))
print('Matplotlib: {}'.format(matplotlib.__version__))
print('OpenCV: {}'.format(cv2.__version__))
print('Skimage: {}'.format(skimage.__version__))
<|reserved_special_token_0|>
def psnr(target, ref):
target_data = target.astype(float)
ref_data = ref.astype(float)
diff = ref_data - target_data
diff = diff.flatten('C')
rmse = math.sqrt(np.mean(diff ** 2))
return 20 * math.log10(255.0 / rmse)
def mse(target, ref):
err = np.sum(target.astype('float') ** 2)
err /= float(target.shape[0] * target.shape[1])
return err
def compare_images(target, ref):
scores = []
scores.append(psnr(target, ref))
scores.append(mse(target, ref))
scores.append(ssim(target, ref, multichannel=True))
return scores
def prepare_images(path, factor):
for file in os.listdir(path):
img = cv2.imread(path + '/' + file)
h, w, c = img.shape
new_height = h / factor
new_width = w / factor
img = cv2.resize(img, (int(new_width), int(new_height)),
interpolation=cv2.INTER_LINEAR)
img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)
print('Saving {}'.format(file))
cv2.imwrite('images//{}'.format(file), img)
prepare_images('source_images/', 2)
for file in os.listdir('images/'):
target = cv2.imread('images/{}'.format(file))
ref = cv2.imread('source_images/{}'.format(file))
scores = compare_images(target, ref)
print('{}\nPSNR: {}\nMSE: {}\nSSIM: {}\n'.format(file, scores[0],
scores[1], scores[2]))
def model():
SRCNN = Sequential()
SRCNN.add(Conv2D(filters=128, kernel_size=(9, 9), activation='relu',
padding='valid', use_bias=True, input_shape=(None, None, 1)))
SRCNN.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu',
padding='same', use_bias=True))
SRCNN.add(Conv2D(filters=1, kernel_size=(5, 5), activation='linear',
padding='valid', use_bias=True))
adam = Adam(learning_rate=0.0003)
SRCNN.compile(loss='mean_squared_error', optimizer=adam, metrics=[
'mean_squared_error'])
return SRCNN
def modcrop(img, scale):
tmpsz = img.shape
sz = tmpsz[0:2]
sz = sz - np.mod(sz, scale)
img = img[0:sz[0], 1:sz[1]]
return img
def shave(image, border):
img = image[border:-border, border:-border]
return img
def predict(image_path):
srcnn = model()
srcnn.load_weights('3051crop_weight_200.h5')
path, file = os.path.split(image_path)
degraded = cv2.imread(image_path)
ref = cv2.imread('source_images/{}'.format(file))
ref = modcrop(ref, 3)
degraded = modcrop(degraded, 3)
temp = cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)
Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype=float)
Y[0, :, :, 0] = temp[:, :, 0].astype(float) / 255
pre = srcnn.predict(Y, batch_size=1)
pre *= 255
pre[pre[:] > 255] = 255
pre[pre[:] < 0] = 0
pre = pre.astype(np.uint8)
temp = shave(temp, 6)
temp[:, :, 0] = pre[0, :, :, 0]
output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)
ref = shave(ref.astype(np.uint8), 6)
degraded = shave(degraded.astype(np.uint8), 6)
scores = []
scores.append(compare_images(degraded, ref))
scores.append(compare_images(output, ref))
return ref, degraded, output, scores
<|reserved_special_token_0|>
print("""Degraded Image:
PSNR: {}
MSE: {}
SSIM: {}
""".format(scores[0][0],
scores[0][1], scores[0][2]))
print("""Reconstructed Image:
PSNR: {}
MSE: {}
SSIM: {}
""".format(scores[
1][0], scores[1][1], scores[1][2]))
<|reserved_special_token_0|>
axs[0].imshow(cv2.cvtColor(ref, cv2.COLOR_BGR2RGB))
axs[0].set_title('Original')
axs[1].imshow(cv2.cvtColor(degraded, cv2.COLOR_BGR2RGB))
axs[1].set_title('Degraded')
axs[2].imshow(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))
axs[2].set_title('SRCNN')
for ax in axs:
ax.set_xticks([])
ax.set_yticks([])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Python: {}'.format(sys.version))
print('Numpy: {}'.format(numpy.__version__))
print('Keras: {}'.format(keras.__version__))
print('Matplotlib: {}'.format(matplotlib.__version__))
print('OpenCV: {}'.format(cv2.__version__))
print('Skimage: {}'.format(skimage.__version__))
<|reserved_special_token_0|>
def psnr(target, ref):
target_data = target.astype(float)
ref_data = ref.astype(float)
diff = ref_data - target_data
diff = diff.flatten('C')
rmse = math.sqrt(np.mean(diff ** 2))
return 20 * math.log10(255.0 / rmse)
def mse(target, ref):
err = np.sum(target.astype('float') ** 2)
err /= float(target.shape[0] * target.shape[1])
return err
def compare_images(target, ref):
scores = []
scores.append(psnr(target, ref))
scores.append(mse(target, ref))
scores.append(ssim(target, ref, multichannel=True))
return scores
def prepare_images(path, factor):
for file in os.listdir(path):
img = cv2.imread(path + '/' + file)
h, w, c = img.shape
new_height = h / factor
new_width = w / factor
img = cv2.resize(img, (int(new_width), int(new_height)),
interpolation=cv2.INTER_LINEAR)
img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)
print('Saving {}'.format(file))
cv2.imwrite('images//{}'.format(file), img)
prepare_images('source_images/', 2)
for file in os.listdir('images/'):
target = cv2.imread('images/{}'.format(file))
ref = cv2.imread('source_images/{}'.format(file))
scores = compare_images(target, ref)
print('{}\nPSNR: {}\nMSE: {}\nSSIM: {}\n'.format(file, scores[0],
scores[1], scores[2]))
def model():
SRCNN = Sequential()
SRCNN.add(Conv2D(filters=128, kernel_size=(9, 9), activation='relu',
padding='valid', use_bias=True, input_shape=(None, None, 1)))
SRCNN.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu',
padding='same', use_bias=True))
SRCNN.add(Conv2D(filters=1, kernel_size=(5, 5), activation='linear',
padding='valid', use_bias=True))
adam = Adam(learning_rate=0.0003)
SRCNN.compile(loss='mean_squared_error', optimizer=adam, metrics=[
'mean_squared_error'])
return SRCNN
def modcrop(img, scale):
tmpsz = img.shape
sz = tmpsz[0:2]
sz = sz - np.mod(sz, scale)
img = img[0:sz[0], 1:sz[1]]
return img
def shave(image, border):
img = image[border:-border, border:-border]
return img
def predict(image_path):
srcnn = model()
srcnn.load_weights('3051crop_weight_200.h5')
path, file = os.path.split(image_path)
degraded = cv2.imread(image_path)
ref = cv2.imread('source_images/{}'.format(file))
ref = modcrop(ref, 3)
degraded = modcrop(degraded, 3)
temp = cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)
Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype=float)
Y[0, :, :, 0] = temp[:, :, 0].astype(float) / 255
pre = srcnn.predict(Y, batch_size=1)
pre *= 255
pre[pre[:] > 255] = 255
pre[pre[:] < 0] = 0
pre = pre.astype(np.uint8)
temp = shave(temp, 6)
temp[:, :, 0] = pre[0, :, :, 0]
output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)
ref = shave(ref.astype(np.uint8), 6)
degraded = shave(degraded.astype(np.uint8), 6)
scores = []
scores.append(compare_images(degraded, ref))
scores.append(compare_images(output, ref))
return ref, degraded, output, scores
ref, degraded, output, scores = predict('images/flowers.bmp')
print("""Degraded Image:
PSNR: {}
MSE: {}
SSIM: {}
""".format(scores[0][0],
scores[0][1], scores[0][2]))
print("""Reconstructed Image:
PSNR: {}
MSE: {}
SSIM: {}
""".format(scores[
1][0], scores[1][1], scores[1][2]))
fig, axs = plt.subplots(1, 3, figsize=(20, 8))
axs[0].imshow(cv2.cvtColor(ref, cv2.COLOR_BGR2RGB))
axs[0].set_title('Original')
axs[1].imshow(cv2.cvtColor(degraded, cv2.COLOR_BGR2RGB))
axs[1].set_title('Degraded')
axs[2].imshow(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))
axs[2].set_title('SRCNN')
for ax in axs:
ax.set_xticks([])
ax.set_yticks([])
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 17:24:39 2020
@author: code
"""
import sys
import keras
import cv2
import numpy
import matplotlib
import skimage
print('Python: {}'.format(sys.version))
print('Numpy: {}'.format(numpy.__version__))
print('Keras: {}'.format(keras.__version__))
print('Matplotlib: {}'.format(matplotlib.__version__))
print('OpenCV: {}'.format(cv2.__version__))
print('Skimage: {}'.format(skimage.__version__))
#import necessary packages
from keras.models import Sequential
from keras.layers import Conv2D, Input
from keras.optimizers import SGD, Adam
from skimage.measure import compare_ssim as ssim
from matplotlib import pyplot as plt
import cv2
import numpy as np
import math
import os
#define A function for peak signal to noise ration(PSNR)
def psnr(target, ref):
#assume RGB/BGR image
target_data = target.astype(float)
ref_data = ref.astype(float)
diff = ref_data - target_data
diff = diff.flatten('C')
rmse = math.sqrt(np.mean(diff ** 2))
return 20*math.log10(255. / rmse)
#define function for mean Squared error(MSE)
def mse(target, ref):
#mse is the sum pf the squared difference between the two image
err = np.sum((target.astype('float'))** 2)
err /= float(target.shape[0] *target.shape[1])
return err
#define function that combines all three image quality metrics
def compare_images(target, ref):
scores = []
scores.append(psnr(target, ref))
scores.append(mse(target, ref))
scores.append(ssim(target, ref, multichannel = True))
return scores
#prepare degraded images by introducing quality distortions via resizing
def prepare_images(path, factor):
#loop throgh filesin the directory
for file in os.listdir(path):
#open the file
img = cv2.imread(path +'/' + file)
#find old and new image dimensions
h, w, c = img.shape
new_height = h / factor
new_width = w / factor
#resize the image -down
img = (cv2.resize(img, (int(new_width), int(new_height)), interpolation = cv2.INTER_LINEAR))
img = (cv2.resize(img, (int(w), int(h)), interpolation = cv2.INTER_LINEAR))
#save the image
print('Saving {}'.format(file))
cv2.imwrite('images//{}'.format(file), img)
prepare_images('source_images/', 2)
#testing the generated images using image quality matrics
for file in os.listdir('images/'):
#open target and reference images
target = cv2.imread('images/{}'.format(file))
ref = cv2.imread('source_images/{}'.format(file))
#calculate the scores
scores = compare_images(target, ref)
#print all three scores
print('{}\nPSNR: {}\nMSE: {}\nSSIM: {}\n'.format(file, scores[0], scores[1], scores[2]))
#define the SRCNN model
def model():
#define the model type
SRCNN = Sequential()
#add model layers
SRCNN.add(Conv2D(filters = 128, kernel_size = (9,9), activation ='relu', padding = 'valid', use_bias = True, input_shape = (None, None, 1)))
SRCNN.add(Conv2D(filters = 64, kernel_size = (3,3), activation ='relu', padding = 'same', use_bias = True ))
SRCNN.add(Conv2D(filters = 1, kernel_size = (5,5), activation ='linear', padding = 'valid', use_bias = True))
#define optimizer
adam = Adam(learning_rate = 0.0003)
#compile model
SRCNN.compile(loss ='mean_squared_error', optimizer = adam, metrics =['mean_squared_error'])
return SRCNN
#define necessary image processing functions
def modcrop(img, scale):
tmpsz = img.shape
sz= tmpsz[0:2]
sz = sz - np.mod(sz, scale)
img = img[0:sz[0], 1:sz[1]]
return img
def shave(image, border):
img = image[border: -border, border: -border]
return img
#define main prediction function
def predict(image_path):
#load the srcnn model with weights
srcnn =model()
srcnn.load_weights('3051crop_weight_200.h5')
#load the degraded and reference images
path, file =os.path.split(image_path)
degraded = cv2.imread(image_path)
ref = cv2.imread('source_images/{}'.format(file))
#preprocess the image with modcrop
ref = modcrop(ref, 3)
degraded = modcrop(degraded, 3)
#convert the image to YCrCb -srcnn trained on Y channel
temp =cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)
#create image slice and normalize
Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype = float)
Y[0, :, :, 0] = temp[:, :, 0].astype(float)/ 255
#perform super resolution with srcnn
pre = srcnn.predict(Y, batch_size = 1)
#post process the output
pre*= 255
pre[pre[:] > 255] = 255
pre[pre[:] < 0] = 0
pre = pre.astype(np.uint8)
#copy Y channel back to image and convert to BGR
temp = shave(temp, 6)
temp[:, :, 0] = pre[0, :, :, 0]
output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)
#remove border from reference and degraded image
ref = shave(ref.astype(np.uint8), 6)
degraded = shave(degraded.astype(np.uint8), 6)
#image quality calculations
scores = []
scores.append(compare_images(degraded, ref))
scores.append(compare_images(output, ref))
#return images and scores
return ref, degraded, output, scores
ref, degraded, output, scores = predict('images/flowers.bmp')
#print all score for all images
print('Degraded Image: \nPSNR: {}\nMSE: {}\nSSIM: {}\n'.format(scores[0][0], scores[0][1], scores[0][2]))
print('Reconstructed Image: \nPSNR: {}\nMSE: {}\nSSIM: {}\n'.format(scores[1][0], scores[1][1], scores[1][2]))
#display images as subplots
fig, axs = plt.subplots(1, 3, figsize = (20, 8))
axs[0].imshow(cv2.cvtColor(ref, cv2.COLOR_BGR2RGB))
axs[0].set_title('Original')
axs[1].imshow(cv2.cvtColor(degraded, cv2.COLOR_BGR2RGB))
axs[1].set_title('Degraded')
axs[2].imshow(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))
axs[2].set_title('SRCNN')
#remove the x and y tick marks
for ax in axs:
ax.set_xticks([])
ax.set_yticks([])
|
flexible
|
{
"blob_id": "e086bebaa166abeea066fe49076f1b007858951f",
"index": 7052,
"step-1": "<mask token>\n\n\ndef compare_images(target, ref):\n scores = []\n scores.append(psnr(target, ref))\n scores.append(mse(target, ref))\n scores.append(ssim(target, ref, multichannel=True))\n return scores\n\n\ndef prepare_images(path, factor):\n for file in os.listdir(path):\n img = cv2.imread(path + '/' + file)\n h, w, c = img.shape\n new_height = h / factor\n new_width = w / factor\n img = cv2.resize(img, (int(new_width), int(new_height)),\n interpolation=cv2.INTER_LINEAR)\n img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)\n print('Saving {}'.format(file))\n cv2.imwrite('images//{}'.format(file), img)\n\n\n<mask token>\n\n\ndef modcrop(img, scale):\n tmpsz = img.shape\n sz = tmpsz[0:2]\n sz = sz - np.mod(sz, scale)\n img = img[0:sz[0], 1:sz[1]]\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef psnr(target, ref):\n target_data = target.astype(float)\n ref_data = ref.astype(float)\n diff = ref_data - target_data\n diff = diff.flatten('C')\n rmse = math.sqrt(np.mean(diff ** 2))\n return 20 * math.log10(255.0 / rmse)\n\n\ndef mse(target, ref):\n err = np.sum(target.astype('float') ** 2)\n err /= float(target.shape[0] * target.shape[1])\n return err\n\n\ndef compare_images(target, ref):\n scores = []\n scores.append(psnr(target, ref))\n scores.append(mse(target, ref))\n scores.append(ssim(target, ref, multichannel=True))\n return scores\n\n\ndef prepare_images(path, factor):\n for file in os.listdir(path):\n img = cv2.imread(path + '/' + file)\n h, w, c = img.shape\n new_height = h / factor\n new_width = w / factor\n img = cv2.resize(img, (int(new_width), int(new_height)),\n interpolation=cv2.INTER_LINEAR)\n img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)\n print('Saving {}'.format(file))\n cv2.imwrite('images//{}'.format(file), img)\n\n\n<mask token>\n\n\ndef modcrop(img, scale):\n tmpsz = img.shape\n sz = tmpsz[0:2]\n sz = sz - np.mod(sz, scale)\n img = img[0:sz[0], 1:sz[1]]\n return img\n\n\ndef shave(image, border):\n img = image[border:-border, border:-border]\n return img\n\n\ndef predict(image_path):\n srcnn = model()\n srcnn.load_weights('3051crop_weight_200.h5')\n path, file = os.path.split(image_path)\n degraded = cv2.imread(image_path)\n ref = cv2.imread('source_images/{}'.format(file))\n ref = modcrop(ref, 3)\n degraded = modcrop(degraded, 3)\n temp = cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)\n Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype=float)\n Y[0, :, :, 0] = temp[:, :, 0].astype(float) / 255\n pre = srcnn.predict(Y, batch_size=1)\n pre *= 255\n pre[pre[:] > 255] = 255\n pre[pre[:] < 0] = 0\n pre = pre.astype(np.uint8)\n temp = shave(temp, 6)\n temp[:, :, 0] = pre[0, :, :, 0]\n output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)\n ref = shave(ref.astype(np.uint8), 6)\n degraded = shave(degraded.astype(np.uint8), 6)\n scores = []\n scores.append(compare_images(degraded, ref))\n scores.append(compare_images(output, ref))\n return ref, degraded, output, scores\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint('Python: {}'.format(sys.version))\nprint('Numpy: {}'.format(numpy.__version__))\nprint('Keras: {}'.format(keras.__version__))\nprint('Matplotlib: {}'.format(matplotlib.__version__))\nprint('OpenCV: {}'.format(cv2.__version__))\nprint('Skimage: {}'.format(skimage.__version__))\n<mask token>\n\n\ndef psnr(target, ref):\n target_data = target.astype(float)\n ref_data = ref.astype(float)\n diff = ref_data - target_data\n diff = diff.flatten('C')\n rmse = math.sqrt(np.mean(diff ** 2))\n return 20 * math.log10(255.0 / rmse)\n\n\ndef mse(target, ref):\n err = np.sum(target.astype('float') ** 2)\n err /= float(target.shape[0] * target.shape[1])\n return err\n\n\ndef compare_images(target, ref):\n scores = []\n scores.append(psnr(target, ref))\n scores.append(mse(target, ref))\n scores.append(ssim(target, ref, multichannel=True))\n return scores\n\n\ndef prepare_images(path, factor):\n for file in os.listdir(path):\n img = cv2.imread(path + '/' + file)\n h, w, c = img.shape\n new_height = h / factor\n new_width = w / factor\n img = cv2.resize(img, (int(new_width), int(new_height)),\n interpolation=cv2.INTER_LINEAR)\n img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)\n print('Saving {}'.format(file))\n cv2.imwrite('images//{}'.format(file), img)\n\n\nprepare_images('source_images/', 2)\nfor file in os.listdir('images/'):\n target = cv2.imread('images/{}'.format(file))\n ref = cv2.imread('source_images/{}'.format(file))\n scores = compare_images(target, ref)\n print('{}\\nPSNR: {}\\nMSE: {}\\nSSIM: {}\\n'.format(file, scores[0],\n scores[1], scores[2]))\n\n\ndef model():\n SRCNN = Sequential()\n SRCNN.add(Conv2D(filters=128, kernel_size=(9, 9), activation='relu',\n padding='valid', use_bias=True, input_shape=(None, None, 1)))\n SRCNN.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu',\n padding='same', use_bias=True))\n SRCNN.add(Conv2D(filters=1, kernel_size=(5, 5), activation='linear',\n padding='valid', use_bias=True))\n adam = Adam(learning_rate=0.0003)\n SRCNN.compile(loss='mean_squared_error', optimizer=adam, metrics=[\n 'mean_squared_error'])\n return SRCNN\n\n\ndef modcrop(img, scale):\n tmpsz = img.shape\n sz = tmpsz[0:2]\n sz = sz - np.mod(sz, scale)\n img = img[0:sz[0], 1:sz[1]]\n return img\n\n\ndef shave(image, border):\n img = image[border:-border, border:-border]\n return img\n\n\ndef predict(image_path):\n srcnn = model()\n srcnn.load_weights('3051crop_weight_200.h5')\n path, file = os.path.split(image_path)\n degraded = cv2.imread(image_path)\n ref = cv2.imread('source_images/{}'.format(file))\n ref = modcrop(ref, 3)\n degraded = modcrop(degraded, 3)\n temp = cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)\n Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype=float)\n Y[0, :, :, 0] = temp[:, :, 0].astype(float) / 255\n pre = srcnn.predict(Y, batch_size=1)\n pre *= 255\n pre[pre[:] > 255] = 255\n pre[pre[:] < 0] = 0\n pre = pre.astype(np.uint8)\n temp = shave(temp, 6)\n temp[:, :, 0] = pre[0, :, :, 0]\n output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)\n ref = shave(ref.astype(np.uint8), 6)\n degraded = shave(degraded.astype(np.uint8), 6)\n scores = []\n scores.append(compare_images(degraded, ref))\n scores.append(compare_images(output, ref))\n return ref, degraded, output, scores\n\n\n<mask token>\nprint(\"\"\"Degraded Image: \nPSNR: {}\nMSE: {}\nSSIM: {}\n\"\"\".format(scores[0][0],\n scores[0][1], scores[0][2]))\nprint(\"\"\"Reconstructed Image: \nPSNR: {}\nMSE: {}\nSSIM: {}\n\"\"\".format(scores[\n 1][0], scores[1][1], scores[1][2]))\n<mask token>\naxs[0].imshow(cv2.cvtColor(ref, cv2.COLOR_BGR2RGB))\naxs[0].set_title('Original')\naxs[1].imshow(cv2.cvtColor(degraded, cv2.COLOR_BGR2RGB))\naxs[1].set_title('Degraded')\naxs[2].imshow(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))\naxs[2].set_title('SRCNN')\nfor ax in axs:\n ax.set_xticks([])\n ax.set_yticks([])\n",
"step-4": "<mask token>\nprint('Python: {}'.format(sys.version))\nprint('Numpy: {}'.format(numpy.__version__))\nprint('Keras: {}'.format(keras.__version__))\nprint('Matplotlib: {}'.format(matplotlib.__version__))\nprint('OpenCV: {}'.format(cv2.__version__))\nprint('Skimage: {}'.format(skimage.__version__))\n<mask token>\n\n\ndef psnr(target, ref):\n target_data = target.astype(float)\n ref_data = ref.astype(float)\n diff = ref_data - target_data\n diff = diff.flatten('C')\n rmse = math.sqrt(np.mean(diff ** 2))\n return 20 * math.log10(255.0 / rmse)\n\n\ndef mse(target, ref):\n err = np.sum(target.astype('float') ** 2)\n err /= float(target.shape[0] * target.shape[1])\n return err\n\n\ndef compare_images(target, ref):\n scores = []\n scores.append(psnr(target, ref))\n scores.append(mse(target, ref))\n scores.append(ssim(target, ref, multichannel=True))\n return scores\n\n\ndef prepare_images(path, factor):\n for file in os.listdir(path):\n img = cv2.imread(path + '/' + file)\n h, w, c = img.shape\n new_height = h / factor\n new_width = w / factor\n img = cv2.resize(img, (int(new_width), int(new_height)),\n interpolation=cv2.INTER_LINEAR)\n img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_LINEAR)\n print('Saving {}'.format(file))\n cv2.imwrite('images//{}'.format(file), img)\n\n\nprepare_images('source_images/', 2)\nfor file in os.listdir('images/'):\n target = cv2.imread('images/{}'.format(file))\n ref = cv2.imread('source_images/{}'.format(file))\n scores = compare_images(target, ref)\n print('{}\\nPSNR: {}\\nMSE: {}\\nSSIM: {}\\n'.format(file, scores[0],\n scores[1], scores[2]))\n\n\ndef model():\n SRCNN = Sequential()\n SRCNN.add(Conv2D(filters=128, kernel_size=(9, 9), activation='relu',\n padding='valid', use_bias=True, input_shape=(None, None, 1)))\n SRCNN.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu',\n padding='same', use_bias=True))\n SRCNN.add(Conv2D(filters=1, kernel_size=(5, 5), activation='linear',\n padding='valid', use_bias=True))\n adam = Adam(learning_rate=0.0003)\n SRCNN.compile(loss='mean_squared_error', optimizer=adam, metrics=[\n 'mean_squared_error'])\n return SRCNN\n\n\ndef modcrop(img, scale):\n tmpsz = img.shape\n sz = tmpsz[0:2]\n sz = sz - np.mod(sz, scale)\n img = img[0:sz[0], 1:sz[1]]\n return img\n\n\ndef shave(image, border):\n img = image[border:-border, border:-border]\n return img\n\n\ndef predict(image_path):\n srcnn = model()\n srcnn.load_weights('3051crop_weight_200.h5')\n path, file = os.path.split(image_path)\n degraded = cv2.imread(image_path)\n ref = cv2.imread('source_images/{}'.format(file))\n ref = modcrop(ref, 3)\n degraded = modcrop(degraded, 3)\n temp = cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)\n Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype=float)\n Y[0, :, :, 0] = temp[:, :, 0].astype(float) / 255\n pre = srcnn.predict(Y, batch_size=1)\n pre *= 255\n pre[pre[:] > 255] = 255\n pre[pre[:] < 0] = 0\n pre = pre.astype(np.uint8)\n temp = shave(temp, 6)\n temp[:, :, 0] = pre[0, :, :, 0]\n output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)\n ref = shave(ref.astype(np.uint8), 6)\n degraded = shave(degraded.astype(np.uint8), 6)\n scores = []\n scores.append(compare_images(degraded, ref))\n scores.append(compare_images(output, ref))\n return ref, degraded, output, scores\n\n\nref, degraded, output, scores = predict('images/flowers.bmp')\nprint(\"\"\"Degraded Image: \nPSNR: {}\nMSE: {}\nSSIM: {}\n\"\"\".format(scores[0][0],\n scores[0][1], scores[0][2]))\nprint(\"\"\"Reconstructed Image: \nPSNR: {}\nMSE: {}\nSSIM: {}\n\"\"\".format(scores[\n 1][0], scores[1][1], scores[1][2]))\nfig, axs = plt.subplots(1, 3, figsize=(20, 8))\naxs[0].imshow(cv2.cvtColor(ref, cv2.COLOR_BGR2RGB))\naxs[0].set_title('Original')\naxs[1].imshow(cv2.cvtColor(degraded, cv2.COLOR_BGR2RGB))\naxs[1].set_title('Degraded')\naxs[2].imshow(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))\naxs[2].set_title('SRCNN')\nfor ax in axs:\n ax.set_xticks([])\n ax.set_yticks([])\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 17 17:24:39 2020\n\n@author: code\n\"\"\"\n\nimport sys\nimport keras\nimport cv2\nimport numpy\nimport matplotlib\nimport skimage\n\nprint('Python: {}'.format(sys.version))\nprint('Numpy: {}'.format(numpy.__version__))\nprint('Keras: {}'.format(keras.__version__))\nprint('Matplotlib: {}'.format(matplotlib.__version__))\nprint('OpenCV: {}'.format(cv2.__version__))\nprint('Skimage: {}'.format(skimage.__version__))\n\n\n#import necessary packages\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Input\nfrom keras.optimizers import SGD, Adam\nfrom skimage.measure import compare_ssim as ssim\nfrom matplotlib import pyplot as plt\nimport cv2\nimport numpy as np\nimport math\nimport os\n\n#define A function for peak signal to noise ration(PSNR)\ndef psnr(target, ref):\n #assume RGB/BGR image\n target_data = target.astype(float)\n ref_data = ref.astype(float)\n \n diff = ref_data - target_data\n diff = diff.flatten('C')\n \n rmse = math.sqrt(np.mean(diff ** 2))\n \n return 20*math.log10(255. / rmse)\n\n\n#define function for mean Squared error(MSE)\ndef mse(target, ref):\n #mse is the sum pf the squared difference between the two image\n \n err = np.sum((target.astype('float'))** 2)\n err /= float(target.shape[0] *target.shape[1])\n \n return err\n \n#define function that combines all three image quality metrics\ndef compare_images(target, ref):\n scores = []\n scores.append(psnr(target, ref))\n scores.append(mse(target, ref))\n scores.append(ssim(target, ref, multichannel = True))\n \n return scores\n\n#prepare degraded images by introducing quality distortions via resizing\n \ndef prepare_images(path, factor):\n \n #loop throgh filesin the directory\n for file in os.listdir(path):\n \n #open the file\n img = cv2.imread(path +'/' + file)\n \n #find old and new image dimensions\n h, w, c = img.shape\n new_height = h / factor\n new_width = w / factor\n \n #resize the image -down\n img = (cv2.resize(img, (int(new_width), int(new_height)), interpolation = cv2.INTER_LINEAR))\n img = (cv2.resize(img, (int(w), int(h)), interpolation = cv2.INTER_LINEAR))\n \n #save the image\n print('Saving {}'.format(file))\n cv2.imwrite('images//{}'.format(file), img)\n \nprepare_images('source_images/', 2)\n\n#testing the generated images using image quality matrics\n\nfor file in os.listdir('images/'):\n \n #open target and reference images\n target = cv2.imread('images/{}'.format(file))\n ref = cv2.imread('source_images/{}'.format(file))\n \n #calculate the scores\n scores = compare_images(target, ref)\n \n #print all three scores\n print('{}\\nPSNR: {}\\nMSE: {}\\nSSIM: {}\\n'.format(file, scores[0], scores[1], scores[2]))\n \n#define the SRCNN model\n \ndef model():\n #define the model type\n SRCNN = Sequential()\n \n #add model layers\n SRCNN.add(Conv2D(filters = 128, kernel_size = (9,9), activation ='relu', padding = 'valid', use_bias = True, input_shape = (None, None, 1)))\n SRCNN.add(Conv2D(filters = 64, kernel_size = (3,3), activation ='relu', padding = 'same', use_bias = True ))\n SRCNN.add(Conv2D(filters = 1, kernel_size = (5,5), activation ='linear', padding = 'valid', use_bias = True))\n\n #define optimizer\n adam = Adam(learning_rate = 0.0003)\n #compile model\n SRCNN.compile(loss ='mean_squared_error', optimizer = adam, metrics =['mean_squared_error'])\n \n return SRCNN\n\n\n#define necessary image processing functions\ndef modcrop(img, scale):\n \n tmpsz = img.shape\n sz= tmpsz[0:2]\n sz = sz - np.mod(sz, scale)\n img = img[0:sz[0], 1:sz[1]]\n return img\n\n\ndef shave(image, border):\n img = image[border: -border, border: -border]\n return img\n\n#define main prediction function\ndef predict(image_path):\n \n #load the srcnn model with weights\n srcnn =model()\n srcnn.load_weights('3051crop_weight_200.h5')\n \n #load the degraded and reference images\n path, file =os.path.split(image_path)\n degraded = cv2.imread(image_path)\n ref = cv2.imread('source_images/{}'.format(file))\n \n #preprocess the image with modcrop\n ref = modcrop(ref, 3)\n degraded = modcrop(degraded, 3)\n \n #convert the image to YCrCb -srcnn trained on Y channel\n temp =cv2.cvtColor(degraded, cv2.COLOR_BGR2YCrCb)\n \n #create image slice and normalize\n Y = np.zeros((1, temp.shape[0], temp.shape[1], 1), dtype = float)\n Y[0, :, :, 0] = temp[:, :, 0].astype(float)/ 255\n \n #perform super resolution with srcnn\n pre = srcnn.predict(Y, batch_size = 1)\n \n #post process the output\n pre*= 255\n pre[pre[:] > 255] = 255\n pre[pre[:] < 0] = 0\n pre = pre.astype(np.uint8)\n \n #copy Y channel back to image and convert to BGR\n temp = shave(temp, 6)\n temp[:, :, 0] = pre[0, :, :, 0]\n output = cv2.cvtColor(temp, cv2.COLOR_YCrCb2BGR)\n \n #remove border from reference and degraded image\n ref = shave(ref.astype(np.uint8), 6)\n degraded = shave(degraded.astype(np.uint8), 6)\n \n #image quality calculations\n scores = []\n scores.append(compare_images(degraded, ref))\n scores.append(compare_images(output, ref))\n \n #return images and scores\n return ref, degraded, output, scores\n \n \n \nref, degraded, output, scores = predict('images/flowers.bmp')\n\n#print all score for all images\nprint('Degraded Image: \\nPSNR: {}\\nMSE: {}\\nSSIM: {}\\n'.format(scores[0][0], scores[0][1], scores[0][2]))\nprint('Reconstructed Image: \\nPSNR: {}\\nMSE: {}\\nSSIM: {}\\n'.format(scores[1][0], scores[1][1], scores[1][2]))\n\n#display images as subplots\nfig, axs = plt.subplots(1, 3, figsize = (20, 8))\naxs[0].imshow(cv2.cvtColor(ref, cv2.COLOR_BGR2RGB))\naxs[0].set_title('Original')\naxs[1].imshow(cv2.cvtColor(degraded, cv2.COLOR_BGR2RGB))\naxs[1].set_title('Degraded')\naxs[2].imshow(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))\naxs[2].set_title('SRCNN')\n\n\n#remove the x and y tick marks\nfor ax in axs:\n ax.set_xticks([])\n ax.set_yticks([])",
"step-ids": [
3,
7,
9,
10,
12
]
}
|
[
3,
7,
9,
10,
12
] |
<|reserved_special_token_0|>
class BilanComptes(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BilanComptes(object):
<|reserved_special_token_0|>
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general
) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'
with dossier_destination.writer(nom) as fichier_writer:
ligne = ['année', 'mois', 'code client', 'code client sap',
'abrév. labo', 'nom labo', 'type client', 'nature client',
'id-compte', 'numéro compte', 'intitulé compte',
'code type compte', 'code type subside', 'Subsides MAj',
'Subsides MOj']
for categorie in subgeneraux.codes_d3():
ligne.append('Subsides ' + categorie + 'j')
ligne += ['total Subsides']
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general
) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'
with dossier_destination.writer(nom) as fichier_writer:
ligne = ['année', 'mois', 'code client', 'code client sap',
'abrév. labo', 'nom labo', 'type client', 'nature client',
'id-compte', 'numéro compte', 'intitulé compte',
'code type compte', 'code type subside', 'Subsides MAj',
'Subsides MOj']
for categorie in subgeneraux.codes_d3():
ligne.append('Subsides ' + categorie + 'j')
ligne += ['total Subsides']
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
<|reserved_special_token_1|>
from outils import Outils
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general
) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'
with dossier_destination.writer(nom) as fichier_writer:
ligne = ['année', 'mois', 'code client', 'code client sap',
'abrév. labo', 'nom labo', 'type client', 'nature client',
'id-compte', 'numéro compte', 'intitulé compte',
'code type compte', 'code type subside', 'Subsides MAj',
'Subsides MOj']
for categorie in subgeneraux.codes_d3():
ligne.append('Subsides ' + categorie + 'j')
ligne += ['total Subsides']
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
<|reserved_special_token_1|>
from outils import Outils
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = "bilan-subsides-comptes_" + str(subedition.annee_fin_general) + "_" + \
Outils.mois_string(subedition.mois_fin_general) + ".csv"
with dossier_destination.writer(nom) as fichier_writer:
ligne = ["année", "mois", "code client", "code client sap", "abrév. labo", "nom labo", "type client",
"nature client", "id-compte", "numéro compte", "intitulé compte", "code type compte",
"code type subside", "Subsides MAj", "Subsides MOj"]
for categorie in subgeneraux.codes_d3():
ligne.append("Subsides " + categorie + "j")
ligne += ["total Subsides"]
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'], client['nature'], id_compte,
num_compte, compte['intitule'], compte['type'], compte['t3'],
Outils.format_2_dec(compte['s-mat']), Outils.format_2_dec(compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' + categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
flexible
|
{
"blob_id": "53c874fbe14031c323f83db58f17990f4e60bc58",
"index": 2195,
"step-1": "<mask token>\n\n\nclass BilanComptes(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-2": "<mask token>\n\n\nclass BilanComptes(object):\n <mask token>\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general\n ) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'\n with dossier_destination.writer(nom) as fichier_writer:\n ligne = ['année', 'mois', 'code client', 'code client sap',\n 'abrév. labo', 'nom labo', 'type client', 'nature client',\n 'id-compte', 'numéro compte', 'intitulé compte',\n 'code type compte', 'code type subside', 'Subsides MAj',\n 'Subsides MOj']\n for categorie in subgeneraux.codes_d3():\n ligne.append('Subsides ' + categorie + 'j')\n ligne += ['total Subsides']\n fichier_writer.writerow(ligne)\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-3": "<mask token>\n\n\nclass BilanComptes(object):\n \"\"\"\n Classe pour la création du bilan des comptes\n \"\"\"\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general\n ) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'\n with dossier_destination.writer(nom) as fichier_writer:\n ligne = ['année', 'mois', 'code client', 'code client sap',\n 'abrév. labo', 'nom labo', 'type client', 'nature client',\n 'id-compte', 'numéro compte', 'intitulé compte',\n 'code type compte', 'code type subside', 'Subsides MAj',\n 'Subsides MOj']\n for categorie in subgeneraux.codes_d3():\n ligne.append('Subsides ' + categorie + 'j')\n ligne += ['total Subsides']\n fichier_writer.writerow(ligne)\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-4": "from outils import Outils\n\n\nclass BilanComptes(object):\n \"\"\"\n Classe pour la création du bilan des comptes\n \"\"\"\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general\n ) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'\n with dossier_destination.writer(nom) as fichier_writer:\n ligne = ['année', 'mois', 'code client', 'code client sap',\n 'abrév. labo', 'nom labo', 'type client', 'nature client',\n 'id-compte', 'numéro compte', 'intitulé compte',\n 'code type compte', 'code type subside', 'Subsides MAj',\n 'Subsides MOj']\n for categorie in subgeneraux.codes_d3():\n ligne.append('Subsides ' + categorie + 'j')\n ligne += ['total Subsides']\n fichier_writer.writerow(ligne)\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-5": "from outils import Outils\n\n\nclass BilanComptes(object):\n \"\"\"\n Classe pour la création du bilan des comptes\n \"\"\"\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = \"bilan-subsides-comptes_\" + str(subedition.annee_fin_general) + \"_\" + \\\n Outils.mois_string(subedition.mois_fin_general) + \".csv\"\n\n with dossier_destination.writer(nom) as fichier_writer:\n\n ligne = [\"année\", \"mois\", \"code client\", \"code client sap\", \"abrév. labo\", \"nom labo\", \"type client\",\n \"nature client\", \"id-compte\", \"numéro compte\", \"intitulé compte\", \"code type compte\",\n \"code type subside\", \"Subsides MAj\", \"Subsides MOj\"]\n for categorie in subgeneraux.codes_d3():\n ligne.append(\"Subsides \" + categorie + \"j\")\n ligne += [\"total Subsides\"]\n fichier_writer.writerow(ligne)\n\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n\n for id_compte, num_compte in sorted(numbers.items(), key=lambda x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'], client['nature'], id_compte,\n num_compte, compte['intitule'], compte['type'], compte['t3'],\n Outils.format_2_dec(compte['s-mat']), Outils.format_2_dec(compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' + categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
import cv2
import myrustlib
def detect_lines_hough(img):
lines = cv2.HoughLinesP(
cv2.bitwise_not(opening),
rho = 1,
theta = np.pi / 2,
threshold=50,
minLineLength=120,
maxLineGap=10
)
return [line[0] for line in lines] # weird HoughLinesP output
def detect_lines_rust(img, min_line_length):
height, width = img.shape
white = (img == 255).flatten().tolist()
detected = myrustlib.detect_lines(white, width, height, min_line_length)
return split_by_orientation(detected)
def detect_lines(img, min_line_length):
"""
Custom line detection algorithm
"""
height, width = img.shape
horizontal = []
vertical = []
current_line = False
current_line_start = 0
white = img == 255
for y in range(height):
for x in range(width):
is_white = white.item(y,x)
if(is_white):
if not current_line:
current_line = True
current_line_start = x
else:
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
if current_line:
current_line = False
if x - current_line_start > min_line_length:
horizontal.append((current_line_start, y, x - 1, y))
current_line = False
current_line_start = 0
for x in range(width):
for y in range(height):
is_white = white.item(y,x)
if(is_white):
if not current_line:
current_line = True
current_line_start = y
else:
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
if current_line:
current_line = False
if y - current_line_start > min_line_length:
vertical.append((x, y - 1, x, current_line_start))
return (horizontal, vertical)
def remove_lines_close_to_border(horizontal, vertical, width, height, min_distance):
horizontal_result = []
vertical_result = []
for h in horizontal:
y = h[1]
if y > min_distance and height - y > min_distance:
horizontal_result.append(h)
for v in vertical:
x = v[0]
if x > min_distance and width - x > min_distance:
vertical_result.append(v)
return (horizontal_result, vertical_result)
def split_by_orientation(lines):
horizontal = []
vertical = []
for x1,y1,x2,y2 in lines:
if (abs(y1-y2) > abs(x1-x2)):
vertical.append((x1,y1,x2,y2))
else:
horizontal.append((x1,y1,x2,y2))
return (horizontal, vertical)
def reduce_lines(input_horizontal, input_vertical, min_distance):
"""
Takes a list of vertical and horizontal lines,
tries to reduce them to essential lines eliminating lines close to each
other.
"""
seen_vertical = set()
seen_horizontal = set()
output_vertical = []
output_horizontal = []
# vertical
for index, (x1,y1,x2,y2) in enumerate(input_vertical):
if index in seen_vertical:
continue
x_values = [x1]
for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_vertical):
if other_index in seen_vertical:
continue
if (abs(x1 - x1_b) < min_distance):
# if the end is further to the top, choose this end
if (y2_b < y2):
y2 = y2_b
# if the start if further to the bottom, choose it
if (y1_b > y1):
y1 = y1_b
x_values.append(x1_b)
seen_vertical.add(other_index)
# taking the average x value for all the lines to get the middle
x = int(np.mean(x_values))
output_vertical.append((x,y1,x,y2))
#horizontal
for index, (x1,y1,x2,y2) in enumerate(input_horizontal):
if index in seen_horizontal:
continue
y_values = [y1, y2]
for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_horizontal):
if other_index in seen_horizontal:
continue
if (abs(y1 - y1_b) < min_distance):
# if the start if further to the left, choose this point
if (x1_b < x1):
x1 = x1_b
# if the end is further to the right, choose it
if (x2_b > x2):
x2 = x2_b
y_values += [y1_b, y2_b]
seen_horizontal.add(other_index)
# taking the average y value for all the lines to get the middle
y = int(np.mean(y_values))
output_horizontal.append((x1,y,x2,y))
return (output_vertical, output_horizontal)
def connect_lines(horizontal_lines, vertical_lines):
"""
Makes sure the ends of every line are touching another line
Possible improvements:
- Prefer crossing lines in the direction of the end
- e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right
- Make sure the "crossing line" is actually long enough to cross this line
Idea:
- Test and improve this algorithm by
- 1. create lines a la mondrian
- 2. randomly shorten this lines
- 3. run the algorithm over the sortened version
- 4. check whether the result is the original
"""
horizontal = []
vertical = []
for x1,y1,x2,y2 in horizontal_lines:
closest_vertical_left = 20000
closest_vertical_right = 20000
for v_x1,v_y1,v_x2,v_y2 in vertical_lines:
if abs(x1 - v_x1) < abs(closest_vertical_left):
closest_vertical_left = x1 - v_x1
if abs(x2 - v_x1) < abs(closest_vertical_right):
closest_vertical_right = x2 - v_x1
x1 = x1 - closest_vertical_left
x2 = x2 - closest_vertical_right
horizontal.append((x1,y1,x2,y2))
for x1,y1,x2,y2 in vertical_lines:
closest_horizontal_up = 20000
closest_horizontal_down = 20000
for h_x1,h_y1,h_x2,h_y2 in horizontal_lines:
if abs(y1 - h_y1) < abs(closest_horizontal_up):
closest_horizontal_up = y1 - h_y1
if abs(y2 - h_y1) < abs(closest_horizontal_down):
closest_horizontal_down = y2 - h_y1
y1 = y1 - closest_horizontal_up
y2 = y2 - closest_horizontal_down
vertical.append((x1,y1,x2,y2))
return (horizontal, vertical)
def find_rectangles(top_left, bottom_left, bottom_right, top_right):
top_right.sort(key=lambda pos: pos[0])
bottom_left.sort(key=lambda pos: pos[1])
rectangles = []
for x,y in top_left:
a = [tr for tr in top_right if tr[1] == y and tr[0] > x]
b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]
if (len(a) == 0 or len(b) == 0):
continue
x2,_a = a[0]
_,y2 = b[0]
w = x2 - x
h = y2 - y
rectangles.append((x,y,w,h))
return rectangles
def find_corners(horizontal, vertical):
top_left = []
top_right = []
bottom_left = []
bottom_right = []
for x_1,y_h,x_2,_ in horizontal:
for x_v,y_1,_,y_2 in vertical:
crossing = (x_v, y_h)
if (x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2):
if (x_1 == x_v):
# left
if (y_1 != y_h):
bottom_left.append(crossing)
if (y_2 != y_h):
top_left.append(crossing)
elif (x_2 == x_v):
# right
if (y_1 != y_h):
bottom_right.append(crossing)
if (y_2 != y_h):
top_right.append(crossing)
else:
if y_1 != y_h:
top_left.append(crossing)
top_right.append(crossing)
if y_2 != y_h:
bottom_left.append(crossing)
bottom_right.append(crossing)
return (top_left, bottom_left, bottom_right, top_right)
|
normal
|
{
"blob_id": "bb5bea4ea100950b59fb2b168b75dec349938aac",
"index": 7195,
"step-1": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\n<mask token>\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\n<mask token>\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\n<mask token>\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-2": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\n<mask token>\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\n<mask token>\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x, y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if len(a) == 0 or len(b) == 0:\n continue\n x2, _a = a[0]\n _, y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x, y, w, h))\n return rectangles\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-3": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\n<mask token>\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\ndef remove_lines_close_to_border(horizontal, vertical, width, height,\n min_distance):\n horizontal_result = []\n vertical_result = []\n for h in horizontal:\n y = h[1]\n if y > min_distance and height - y > min_distance:\n horizontal_result.append(h)\n for v in vertical:\n x = v[0]\n if x > min_distance and width - x > min_distance:\n vertical_result.append(v)\n return horizontal_result, vertical_result\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x, y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if len(a) == 0 or len(b) == 0:\n continue\n x2, _a = a[0]\n _, y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x, y, w, h))\n return rectangles\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-4": "<mask token>\n\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(cv2.bitwise_not(opening), rho=1, theta=np.pi / \n 2, threshold=50, minLineLength=120, maxLineGap=10)\n return [line[0] for line in lines]\n\n\ndef detect_lines_rust(img, min_line_length):\n height, width = img.shape\n white = (img == 255).flatten().tolist()\n detected = myrustlib.detect_lines(white, width, height, min_line_length)\n return split_by_orientation(detected)\n\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n white = img == 255\n for y in range(height):\n for x in range(width):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = x\n elif current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y, x)\n if is_white:\n if not current_line:\n current_line = True\n current_line_start = y\n elif current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return horizontal, vertical\n\n\ndef remove_lines_close_to_border(horizontal, vertical, width, height,\n min_distance):\n horizontal_result = []\n vertical_result = []\n for h in horizontal:\n y = h[1]\n if y > min_distance and height - y > min_distance:\n horizontal_result.append(h)\n for v in vertical:\n x = v[0]\n if x > min_distance and width - x > min_distance:\n vertical_result.append(v)\n return horizontal_result, vertical_result\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in lines:\n if abs(y1 - y2) > abs(x1 - x2):\n vertical.append((x1, y1, x2, y2))\n else:\n horizontal.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n for index, (x1, y1, x2, y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if abs(x1 - x1_b) < min_distance:\n if y2_b < y2:\n y2 = y2_b\n if y1_b > y1:\n y1 = y1_b\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n x = int(np.mean(x_values))\n output_vertical.append((x, y1, x, y2))\n for index, (x1, y1, x2, y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b, y1_b, x2_b, y2_b) in enumerate(input_horizontal\n ):\n if other_index in seen_horizontal:\n continue\n if abs(y1 - y1_b) < min_distance:\n if x1_b < x1:\n x1 = x1_b\n if x2_b > x2:\n x2 = x2_b\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n y = int(np.mean(y_values))\n output_horizontal.append((x1, y, x2, y))\n return output_vertical, output_horizontal\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n for x1, y1, x2, y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1, v_y1, v_x2, v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1, y1, x2, y2))\n for x1, y1, x2, y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1, h_y1, h_x2, h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1, y1, x2, y2))\n return horizontal, vertical\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x, y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if len(a) == 0 or len(b) == 0:\n continue\n x2, _a = a[0]\n _, y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x, y, w, h))\n return rectangles\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n for x_1, y_h, x_2, _ in horizontal:\n for x_v, y_1, _, y_2 in vertical:\n crossing = x_v, y_h\n if x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2:\n if x_1 == x_v:\n if y_1 != y_h:\n bottom_left.append(crossing)\n if y_2 != y_h:\n top_left.append(crossing)\n elif x_2 == x_v:\n if y_1 != y_h:\n bottom_right.append(crossing)\n if y_2 != y_h:\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n return top_left, bottom_left, bottom_right, top_right\n",
"step-5": "import numpy as np\nimport cv2\nimport myrustlib\n\ndef detect_lines_hough(img):\n lines = cv2.HoughLinesP(\n cv2.bitwise_not(opening),\n rho = 1,\n theta = np.pi / 2,\n threshold=50,\n minLineLength=120,\n maxLineGap=10\n )\n return [line[0] for line in lines] # weird HoughLinesP output\n\ndef detect_lines_rust(img, min_line_length):\n height, width = img.shape\n white = (img == 255).flatten().tolist()\n detected = myrustlib.detect_lines(white, width, height, min_line_length)\n return split_by_orientation(detected)\n\ndef detect_lines(img, min_line_length):\n \"\"\"\n Custom line detection algorithm\n \"\"\"\n height, width = img.shape\n horizontal = []\n vertical = []\n current_line = False\n current_line_start = 0\n\n white = img == 255\n\n for y in range(height):\n for x in range(width):\n is_white = white.item(y,x)\n if(is_white):\n if not current_line:\n current_line = True\n current_line_start = x\n else:\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n if current_line:\n current_line = False\n if x - current_line_start > min_line_length:\n horizontal.append((current_line_start, y, x - 1, y))\n\n current_line = False\n current_line_start = 0\n for x in range(width):\n for y in range(height):\n is_white = white.item(y,x)\n if(is_white):\n if not current_line:\n current_line = True\n current_line_start = y\n else:\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n if current_line:\n current_line = False\n if y - current_line_start > min_line_length:\n vertical.append((x, y - 1, x, current_line_start))\n return (horizontal, vertical)\n\ndef remove_lines_close_to_border(horizontal, vertical, width, height, min_distance):\n horizontal_result = []\n vertical_result = []\n for h in horizontal:\n y = h[1]\n if y > min_distance and height - y > min_distance:\n horizontal_result.append(h)\n for v in vertical:\n x = v[0]\n if x > min_distance and width - x > min_distance:\n vertical_result.append(v)\n return (horizontal_result, vertical_result)\n\n\ndef split_by_orientation(lines):\n horizontal = []\n vertical = []\n for x1,y1,x2,y2 in lines:\n if (abs(y1-y2) > abs(x1-x2)):\n vertical.append((x1,y1,x2,y2))\n else:\n horizontal.append((x1,y1,x2,y2))\n return (horizontal, vertical)\n\ndef reduce_lines(input_horizontal, input_vertical, min_distance):\n \"\"\"\n Takes a list of vertical and horizontal lines,\n tries to reduce them to essential lines eliminating lines close to each\n other.\n \"\"\"\n\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n\n # vertical\n for index, (x1,y1,x2,y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if (abs(x1 - x1_b) < min_distance):\n # if the end is further to the top, choose this end\n if (y2_b < y2):\n y2 = y2_b\n # if the start if further to the bottom, choose it\n if (y1_b > y1):\n y1 = y1_b\n\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n\n # taking the average x value for all the lines to get the middle\n x = int(np.mean(x_values))\n output_vertical.append((x,y1,x,y2))\n\n #horizontal\n for index, (x1,y1,x2,y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_horizontal):\n if other_index in seen_horizontal:\n continue\n if (abs(y1 - y1_b) < min_distance):\n # if the start if further to the left, choose this point\n if (x1_b < x1):\n x1 = x1_b\n # if the end is further to the right, choose it\n if (x2_b > x2):\n x2 = x2_b\n\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n\n # taking the average y value for all the lines to get the middle\n y = int(np.mean(y_values))\n output_horizontal.append((x1,y,x2,y))\n\n return (output_vertical, output_horizontal)\n\n\n\ndef connect_lines(horizontal_lines, vertical_lines):\n \"\"\"\n Makes sure the ends of every line are touching another line\n\n Possible improvements:\n - Prefer crossing lines in the direction of the end\n - e.g. the right end of a horizontal should rather connect to a vertical to the closest_vertical_right\n - Make sure the \"crossing line\" is actually long enough to cross this line\n\n Idea:\n - Test and improve this algorithm by\n - 1. create lines a la mondrian\n - 2. randomly shorten this lines\n - 3. run the algorithm over the sortened version\n - 4. check whether the result is the original\n \"\"\"\n horizontal = []\n vertical = []\n\n for x1,y1,x2,y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1,v_y1,v_x2,v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1,y1,x2,y2))\n\n for x1,y1,x2,y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1,h_y1,h_x2,h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1,y1,x2,y2))\n\n return (horizontal, vertical)\n\n\ndef find_rectangles(top_left, bottom_left, bottom_right, top_right):\n top_right.sort(key=lambda pos: pos[0])\n bottom_left.sort(key=lambda pos: pos[1])\n rectangles = []\n for x,y in top_left:\n a = [tr for tr in top_right if tr[1] == y and tr[0] > x]\n b = [bl for bl in bottom_left if bl[0] == x and bl[1] > y]\n if (len(a) == 0 or len(b) == 0):\n continue\n x2,_a = a[0]\n _,y2 = b[0]\n w = x2 - x\n h = y2 - y\n rectangles.append((x,y,w,h))\n return rectangles\n\n\n\ndef find_corners(horizontal, vertical):\n top_left = []\n top_right = []\n bottom_left = []\n bottom_right = []\n\n for x_1,y_h,x_2,_ in horizontal:\n for x_v,y_1,_,y_2 in vertical:\n crossing = (x_v, y_h)\n if (x_v >= x_1 and x_v <= x_2 and y_h <= y_1 and y_h >= y_2):\n if (x_1 == x_v):\n # left\n if (y_1 != y_h):\n bottom_left.append(crossing)\n if (y_2 != y_h):\n top_left.append(crossing)\n elif (x_2 == x_v):\n # right\n if (y_1 != y_h):\n bottom_right.append(crossing)\n if (y_2 != y_h):\n top_right.append(crossing)\n else:\n if y_1 != y_h:\n top_left.append(crossing)\n top_right.append(crossing)\n if y_2 != y_h:\n bottom_left.append(crossing)\n bottom_right.append(crossing)\n\n return (top_left, bottom_left, bottom_right, top_right)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Sol Amour - amoursol@gmail.com'
__twitter__ = '@solamour'
__version__ = '1.0.0'
greaterThan = 10 > 5
greaterThanOrEqualTo = 10 >= 10
lessThan = 5 < 10
lessThanOrEqualTo = 5 <= 5
equals = 5 == 5
notEquals = 5 != 10
x = 2
y = 1 < x < 3
OUT = [greaterThan, greaterThanOrEqualTo, lessThan, lessThanOrEqualTo,
equals, notEquals, y]
<|reserved_special_token_1|>
"""
COMPARISON OPERATORS
"""
__author__ = 'Sol Amour - amoursol@gmail.com'
__twitter__ = '@solamour'
__version__ = '1.0.0'
greaterThan = 10 > 5 # Is '10' greater than '5' ? Evaluates to True
greaterThanOrEqualTo = 10 >= 10 # Is '10' greater than or equal to '10'
# ? Evaluates to True
lessThan = 5 < 10 # Is '5' less than '10' ? Evaluates to True
lessThanOrEqualTo = 5 <= 5 # Is '5' less than or equal to '5' ? Evaluates
# to True
equals = 5 == 5 # Does '5' equal '5' ? Evaluates to True
notEquals = 5 != 10 # Does '5' not equal '10' ? Evaluates to True
x = 2 # Assinging the variable of 'x' a value of '2'
y = 1 < x < 3 # Is '1' less than 'x' (2) is less than 3 ? Evaluates to True
OUT = [greaterThan, greaterThanOrEqualTo, lessThan, lessThanOrEqualTo,
equals, notEquals, y]
|
flexible
|
{
"blob_id": "3b737aaa820da8f70a80480c6404e4d3a9d2262e",
"index": 5602,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__author__ = 'Sol Amour - amoursol@gmail.com'\n__twitter__ = '@solamour'\n__version__ = '1.0.0'\ngreaterThan = 10 > 5\ngreaterThanOrEqualTo = 10 >= 10\nlessThan = 5 < 10\nlessThanOrEqualTo = 5 <= 5\nequals = 5 == 5\nnotEquals = 5 != 10\nx = 2\ny = 1 < x < 3\nOUT = [greaterThan, greaterThanOrEqualTo, lessThan, lessThanOrEqualTo,\n equals, notEquals, y]\n",
"step-3": "\"\"\"\nCOMPARISON OPERATORS\n\"\"\"\n__author__ = 'Sol Amour - amoursol@gmail.com'\n__twitter__ = '@solamour'\n__version__ = '1.0.0'\n\ngreaterThan = 10 > 5 # Is '10' greater than '5' ? Evaluates to True\ngreaterThanOrEqualTo = 10 >= 10 # Is '10' greater than or equal to '10' \n# ? Evaluates to True\nlessThan = 5 < 10 # Is '5' less than '10' ? Evaluates to True\nlessThanOrEqualTo = 5 <= 5 # Is '5' less than or equal to '5' ? Evaluates \n# to True\nequals = 5 == 5 # Does '5' equal '5' ? Evaluates to True\nnotEquals = 5 != 10 # Does '5' not equal '10' ? Evaluates to True\n\nx = 2 # Assinging the variable of 'x' a value of '2'\ny = 1 < x < 3 # Is '1' less than 'x' (2) is less than 3 ? Evaluates to True\n\nOUT = [greaterThan, greaterThanOrEqualTo, lessThan, lessThanOrEqualTo,\nequals, notEquals, y]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 3.2 on 2021-05-22 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recuerdos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo_evento', models.CharField(blank=True, max_length=100, null=True)),
('foto1', models.ImageField(blank=True, null=True, upload_to='recuerdos')),
('foto2', models.ImageField(blank=True, null=True, upload_to='recuerdos')),
('foto3', models.ImageField(blank=True, null=True, upload_to='recuerdos')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Recuerdo',
'verbose_name_plural': 'recurdo',
},
),
]
|
normal
|
{
"blob_id": "89d0d5d13c5106c504c6727c7784f048a30495dc",
"index": 5560,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Recuerdos', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('titulo_evento', models.CharField(\n blank=True, max_length=100, null=True)), ('foto1', models.\n ImageField(blank=True, null=True, upload_to='recuerdos')), ('foto2',\n models.ImageField(blank=True, null=True, upload_to='recuerdos')), (\n 'foto3', models.ImageField(blank=True, null=True, upload_to=\n 'recuerdos')), ('created', models.DateTimeField(auto_now_add=True))\n ], options={'verbose_name': 'Recuerdo', 'verbose_name_plural':\n 'recurdo'})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Recuerdos', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('titulo_evento', models.CharField(\n blank=True, max_length=100, null=True)), ('foto1', models.\n ImageField(blank=True, null=True, upload_to='recuerdos')), ('foto2',\n models.ImageField(blank=True, null=True, upload_to='recuerdos')), (\n 'foto3', models.ImageField(blank=True, null=True, upload_to=\n 'recuerdos')), ('created', models.DateTimeField(auto_now_add=True))\n ], options={'verbose_name': 'Recuerdo', 'verbose_name_plural':\n 'recurdo'})]\n",
"step-5": "# Generated by Django 3.2 on 2021-05-22 06:54\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Recuerdos',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('titulo_evento', models.CharField(blank=True, max_length=100, null=True)),\n ('foto1', models.ImageField(blank=True, null=True, upload_to='recuerdos')),\n ('foto2', models.ImageField(blank=True, null=True, upload_to='recuerdos')),\n ('foto3', models.ImageField(blank=True, null=True, upload_to='recuerdos')),\n ('created', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'verbose_name': 'Recuerdo',\n 'verbose_name_plural': 'recurdo',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
buf = io.BytesIO()
buf.write('hello world')
buf.seek(0)
return send_file(buf, attachment_filename='testing.txt', as_attachment=True
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def index():
buf = io.BytesIO()
buf.write('hello world')
buf.seek(0)
return send_file(buf, attachment_filename='testing.txt', as_attachment=True
)
<|reserved_special_token_1|>
import io
from flask import Flask, send_file
app = Flask(__name__)
@app.route('/')
def index():
buf = io.BytesIO()
buf.write('hello world')
buf.seek(0)
return send_file(buf, attachment_filename='testing.txt', as_attachment=True
)
<|reserved_special_token_1|>
import io
from flask import Flask, send_file
app = Flask(__name__)
@app.route('/')
def index():
buf = io.BytesIO()
buf.write('hello world')
buf.seek(0)
return send_file(buf,
attachment_filename="testing.txt",
as_attachment=True)
|
flexible
|
{
"blob_id": "362c4e572f0fe61b77e54ab5608d4cd052291da4",
"index": 4043,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf, attachment_filename='testing.txt', as_attachment=True\n )\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf, attachment_filename='testing.txt', as_attachment=True\n )\n",
"step-4": "import io\nfrom flask import Flask, send_file\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf, attachment_filename='testing.txt', as_attachment=True\n )\n",
"step-5": "import io\n\nfrom flask import Flask, send_file\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf,\n attachment_filename=\"testing.txt\",\n as_attachment=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def randomMath():
correct = 0
while correct < 10:
str_ops = ['+', '-', '*', '/', '%']
ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}
x = r(1, 10)
y = r(1, 10)
op = str_ops[r(0, 4)]
inp = input(str(x) + op + str(y) + '=')
if int(inp) == ops[op](x, y):
correct += 1
print('Correct! Only ' + str(10 - correct) +
' correct answers to go!')
else:
print('Wrong! ' + str(10 - correct) + ' correct answers to go!')
print('Congrats!! Good brain training.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def randomMath():
correct = 0
while correct < 10:
str_ops = ['+', '-', '*', '/', '%']
ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}
x = r(1, 10)
y = r(1, 10)
op = str_ops[r(0, 4)]
inp = input(str(x) + op + str(y) + '=')
if int(inp) == ops[op](x, y):
correct += 1
print('Correct! Only ' + str(10 - correct) +
' correct answers to go!')
else:
print('Wrong! ' + str(10 - correct) + ' correct answers to go!')
print('Congrats!! Good brain training.')
randomMath()
<|reserved_special_token_1|>
from random import randint as r
import operator as o
def randomMath():
correct = 0
while correct < 10:
str_ops = ['+', '-', '*', '/', '%']
ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}
x = r(1, 10)
y = r(1, 10)
op = str_ops[r(0, 4)]
inp = input(str(x) + op + str(y) + '=')
if int(inp) == ops[op](x, y):
correct += 1
print('Correct! Only ' + str(10 - correct) +
' correct answers to go!')
else:
print('Wrong! ' + str(10 - correct) + ' correct answers to go!')
print('Congrats!! Good brain training.')
randomMath()
<|reserved_special_token_1|>
#cerner_2^5_2019
#Mason Seeger submission 1
from random import randint as r
import operator as o
#Only works with valid integers. A function for quick math brain training.
def randomMath():
correct = 0
while(correct<10):
str_ops = ['+', '-', '*', '/', '%']
ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}
x = r(1,10)
y = r(1,10)
op = str_ops[r(0,4)]
inp = input(str(x) + op + str(y) + '=')
if int(inp) == ops[op](x, y):
correct+=1
print("Correct! Only " + str(10-correct) + ' correct answers to go!')
else:
print("Wrong! " + str(10-correct) + ' correct answers to go!')
print("Congrats!! Good brain training.")
randomMath()
|
flexible
|
{
"blob_id": "12f035962925c5380c782e8fad23f16fe9fb9435",
"index": 5311,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef randomMath():\n correct = 0\n while correct < 10:\n str_ops = ['+', '-', '*', '/', '%']\n ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}\n x = r(1, 10)\n y = r(1, 10)\n op = str_ops[r(0, 4)]\n inp = input(str(x) + op + str(y) + '=')\n if int(inp) == ops[op](x, y):\n correct += 1\n print('Correct! Only ' + str(10 - correct) +\n ' correct answers to go!')\n else:\n print('Wrong! ' + str(10 - correct) + ' correct answers to go!')\n print('Congrats!! Good brain training.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef randomMath():\n correct = 0\n while correct < 10:\n str_ops = ['+', '-', '*', '/', '%']\n ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}\n x = r(1, 10)\n y = r(1, 10)\n op = str_ops[r(0, 4)]\n inp = input(str(x) + op + str(y) + '=')\n if int(inp) == ops[op](x, y):\n correct += 1\n print('Correct! Only ' + str(10 - correct) +\n ' correct answers to go!')\n else:\n print('Wrong! ' + str(10 - correct) + ' correct answers to go!')\n print('Congrats!! Good brain training.')\n\n\nrandomMath()\n",
"step-4": "from random import randint as r\nimport operator as o\n\n\ndef randomMath():\n correct = 0\n while correct < 10:\n str_ops = ['+', '-', '*', '/', '%']\n ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}\n x = r(1, 10)\n y = r(1, 10)\n op = str_ops[r(0, 4)]\n inp = input(str(x) + op + str(y) + '=')\n if int(inp) == ops[op](x, y):\n correct += 1\n print('Correct! Only ' + str(10 - correct) +\n ' correct answers to go!')\n else:\n print('Wrong! ' + str(10 - correct) + ' correct answers to go!')\n print('Congrats!! Good brain training.')\n\n\nrandomMath()\n",
"step-5": "#cerner_2^5_2019\n#Mason Seeger submission 1\n\nfrom random import randint as r\nimport operator as o\n\n#Only works with valid integers. A function for quick math brain training.\ndef randomMath():\n correct = 0\n while(correct<10):\n str_ops = ['+', '-', '*', '/', '%']\n ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}\n x = r(1,10)\n y = r(1,10)\n op = str_ops[r(0,4)]\n\n inp = input(str(x) + op + str(y) + '=')\n if int(inp) == ops[op](x, y):\n correct+=1\n print(\"Correct! Only \" + str(10-correct) + ' correct answers to go!')\n else:\n print(\"Wrong! \" + str(10-correct) + ' correct answers to go!')\n\n print(\"Congrats!! Good brain training.\")\n\nrandomMath()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def printBoard(board,pref):
border = "+----+----+----+----+----+----+----+----+"
for row in board:
print(pref,border)
cells ="|"
for cell in row:
if cell == 0:
cell = " "
elif cell in range(1,10):
cell = "0{}".format(cell)
cells +=" {} ".format(cell)
cells +="|"
print(pref,cells )
print(pref,border)
|
normal
|
{
"blob_id": "07e875a24d0e63ef596db57c4ec402f768225eec",
"index": 5103,
"step-1": "<mask token>\n",
"step-2": "def printBoard(board, pref):\n border = '+----+----+----+----+----+----+----+----+'\n for row in board:\n print(pref, border)\n cells = '|'\n for cell in row:\n if cell == 0:\n cell = ' '\n elif cell in range(1, 10):\n cell = '0{}'.format(cell)\n cells += ' {} '.format(cell)\n cells += '|'\n print(pref, cells)\n print(pref, border)\n",
"step-3": "def printBoard(board,pref):\n border = \"+----+----+----+----+----+----+----+----+\"\n for row in board:\n print(pref,border)\n cells =\"|\"\n for cell in row:\n if cell == 0:\n cell = \" \"\n elif cell in range(1,10):\n cell = \"0{}\".format(cell)\n cells +=\" {} \".format(cell)\n cells +=\"|\"\n \n print(pref,cells )\n print(pref,border)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 23:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20160207_1529'),
]
operations = [
migrations.AddField(
model_name='event',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='eventtemplate',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='historicalevent',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='historicaleventtemplate',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AlterField(
model_name='event',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='event',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='eventtemplate',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='eventtemplate',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='historicalevent',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='historicalevent',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='historicaleventtemplate',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='historicaleventtemplate',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
]
|
normal
|
{
"blob_id": "ab3609c27fa002d79735c5d5c09ec7a52fedd040",
"index": 3484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('events', '0005_auto_20160207_1529')]\n operations = [migrations.AddField(model_name='event', name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),\n ('N', 'No Skins')], default='N', max_length=1, verbose_name=\n 'Skins type')), migrations.AddField(model_name='eventtemplate',\n name='skins_type', field=models.CharField(choices=[('I',\n 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',\n max_length=1, verbose_name='Skins type')), migrations.AddField(\n model_name='historicalevent', name='skins_type', field=models.\n CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',\n 'No Skins')], default='N', max_length=1, verbose_name='Skins type')\n ), migrations.AddField(model_name='historicaleventtemplate', name=\n 'skins_type', field=models.CharField(choices=[('I', 'Individual'),\n ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,\n verbose_name='Skins type')), migrations.AlterField(model_name=\n 'event', name='event_type', field=models.CharField(choices=[('L',\n 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),\n ('O', 'Other')], default='M', max_length=1, verbose_name=\n 'Event type')), migrations.AlterField(model_name='event', name=\n 'scoring', field=models.CharField(choices=[('IN', 'Individual'), (\n 'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name='eventtemplate',\n name='event_type', field=models.CharField(choices=[('L', 'League'),\n ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',\n 'Other')], default='M', max_length=1, verbose_name='Event type')),\n migrations.AlterField(model_name='eventtemplate', name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB',\n 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name=\n 'historicalevent', name='event_type', field=models.CharField(\n choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',\n 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length\n =1, verbose_name='Event type')), migrations.AlterField(model_name=\n 'historicalevent', name='scoring', field=models.CharField(choices=[\n ('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',\n 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='event_type', field=\n models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),\n ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',\n max_length=1, verbose_name='Event type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='scoring', field=models.\n CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),\n ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('events', '0005_auto_20160207_1529')]\n operations = [migrations.AddField(model_name='event', name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),\n ('N', 'No Skins')], default='N', max_length=1, verbose_name=\n 'Skins type')), migrations.AddField(model_name='eventtemplate',\n name='skins_type', field=models.CharField(choices=[('I',\n 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',\n max_length=1, verbose_name='Skins type')), migrations.AddField(\n model_name='historicalevent', name='skins_type', field=models.\n CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',\n 'No Skins')], default='N', max_length=1, verbose_name='Skins type')\n ), migrations.AddField(model_name='historicaleventtemplate', name=\n 'skins_type', field=models.CharField(choices=[('I', 'Individual'),\n ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,\n verbose_name='Skins type')), migrations.AlterField(model_name=\n 'event', name='event_type', field=models.CharField(choices=[('L',\n 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),\n ('O', 'Other')], default='M', max_length=1, verbose_name=\n 'Event type')), migrations.AlterField(model_name='event', name=\n 'scoring', field=models.CharField(choices=[('IN', 'Individual'), (\n 'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name='eventtemplate',\n name='event_type', field=models.CharField(choices=[('L', 'League'),\n ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',\n 'Other')], default='M', max_length=1, verbose_name='Event type')),\n migrations.AlterField(model_name='eventtemplate', name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB',\n 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name=\n 'historicalevent', name='event_type', field=models.CharField(\n choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',\n 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length\n =1, verbose_name='Event type')), migrations.AlterField(model_name=\n 'historicalevent', name='scoring', field=models.CharField(choices=[\n ('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',\n 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='event_type', field=\n models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),\n ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',\n max_length=1, verbose_name='Event type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='scoring', field=models.\n CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),\n ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-07 23:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('events', '0005_auto_20160207_1529'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='eventtemplate',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='historicalevent',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='historicaleventtemplate',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AlterField(\n model_name='event',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='event',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='eventtemplate',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='eventtemplate',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='historicalevent',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='historicalevent',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='historicaleventtemplate',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='historicaleventtemplate',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def norm(x, media, sd):
norm = []
for i in range(x.size):
norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **
2 / (2 * sd ** 2))]
return np.array(norm)
<|reserved_special_token_0|>
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def norm(x, media, sd):
norm = []
for i in range(x.size):
norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **
2 / (2 * sd ** 2))]
return np.array(norm)
media1 = 0
media2 = -2
std1 = 0.5
std2 = 1
x = np.linspace(-20, 20, 500)
y_real = norm(x, media1, std1) + norm(x, media2, std2)
m, dm, sd1, sd2 = [5, 10, 1, 1]
p = [m, dm, sd1, sd2]
y_init = norm(x, m, sd1) + norm(x, m + dm, sd2)
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
plsq = leastsq(res, p, args=(y_real, x))
y_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1],
plsq[0][3])
<|reserved_special_token_1|>
import matplotlib.pyplot as pt
import numpy as np
from scipy.optimize import leastsq
def norm(x, media, sd):
norm = []
for i in range(x.size):
norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **
2 / (2 * sd ** 2))]
return np.array(norm)
media1 = 0
media2 = -2
std1 = 0.5
std2 = 1
x = np.linspace(-20, 20, 500)
y_real = norm(x, media1, std1) + norm(x, media2, std2)
m, dm, sd1, sd2 = [5, 10, 1, 1]
p = [m, dm, sd1, sd2]
y_init = norm(x, m, sd1) + norm(x, m + dm, sd2)
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
plsq = leastsq(res, p, args=(y_real, x))
y_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1],
plsq[0][3])
<|reserved_special_token_1|>
import matplotlib.pyplot as pt
import numpy as np
from scipy.optimize import leastsq
####################################
# Setting up test data
def norm(x, media, sd):
norm = []
for i in range(x.size):
norm += [1.0/(sd*np.sqrt(2*np.pi))*np.exp(-(x[i] - media)**2/(2*sd**2))]
return np.array(norm)
media1 = 0
media2 = -2
std1 = 0.5
std2 = 1
x = np.linspace(-20, 20, 500)
y_real = norm(x, media1, std1) + norm(x, media2, std2)
######################################
# Solving
m, dm, sd1, sd2 = [5, 10, 1, 1]
p = [m, dm, sd1, sd2] # Initial guesses for leastsq
y_init = norm(x,m,sd1) + norm(x, m + dm, sd2) # For final comparison plot
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
plsq = leastsq(res, p, args = (y_real, x))
y_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1], plsq[0][3])
|
flexible
|
{
"blob_id": "b3ce17401476afe2edfda3011d5602ba492cd705",
"index": 5817,
"step-1": "<mask token>\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef norm(x, media, sd):\n norm = []\n for i in range(x.size):\n norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **\n 2 / (2 * sd ** 2))]\n return np.array(norm)\n\n\n<mask token>\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef norm(x, media, sd):\n norm = []\n for i in range(x.size):\n norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **\n 2 / (2 * sd ** 2))]\n return np.array(norm)\n\n\nmedia1 = 0\nmedia2 = -2\nstd1 = 0.5\nstd2 = 1\nx = np.linspace(-20, 20, 500)\ny_real = norm(x, media1, std1) + norm(x, media2, std2)\nm, dm, sd1, sd2 = [5, 10, 1, 1]\np = [m, dm, sd1, sd2]\ny_init = norm(x, m, sd1) + norm(x, m + dm, sd2)\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\nplsq = leastsq(res, p, args=(y_real, x))\ny_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1],\n plsq[0][3])\n",
"step-4": "import matplotlib.pyplot as pt\nimport numpy as np\nfrom scipy.optimize import leastsq\n\n\ndef norm(x, media, sd):\n norm = []\n for i in range(x.size):\n norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **\n 2 / (2 * sd ** 2))]\n return np.array(norm)\n\n\nmedia1 = 0\nmedia2 = -2\nstd1 = 0.5\nstd2 = 1\nx = np.linspace(-20, 20, 500)\ny_real = norm(x, media1, std1) + norm(x, media2, std2)\nm, dm, sd1, sd2 = [5, 10, 1, 1]\np = [m, dm, sd1, sd2]\ny_init = norm(x, m, sd1) + norm(x, m + dm, sd2)\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\nplsq = leastsq(res, p, args=(y_real, x))\ny_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1],\n plsq[0][3])\n",
"step-5": "import matplotlib.pyplot as pt\nimport numpy as np\nfrom scipy.optimize import leastsq\n\n####################################\n# Setting up test data\n\ndef norm(x, media, sd):\n norm = []\n\n for i in range(x.size):\n norm += [1.0/(sd*np.sqrt(2*np.pi))*np.exp(-(x[i] - media)**2/(2*sd**2))]\n return np.array(norm)\n\nmedia1 = 0\nmedia2 = -2\nstd1 = 0.5\nstd2 = 1\n\nx = np.linspace(-20, 20, 500)\ny_real = norm(x, media1, std1) + norm(x, media2, std2)\n\n######################################\n# Solving\n\nm, dm, sd1, sd2 = [5, 10, 1, 1]\np = [m, dm, sd1, sd2] # Initial guesses for leastsq\ny_init = norm(x,m,sd1) + norm(x, m + dm, sd2) # For final comparison plot\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n\n return error\n\nplsq = leastsq(res, p, args = (y_real, x))\n\ny_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1], plsq[0][3])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not os.path.exists(filepath + pathRGB):
os.makedirs(filepath + pathRGB)
backSubInstance.setConfig('sample.cfg')
for filename in glob.glob(filepath + extension):
pathAndFile = os.path.splitext(filename)[0]
latestFilename = ntpath.basename(pathAndFile)
image = cv2.imread(filepath + latestFilename + '.jpg', cv2.
CV_LOAD_IMAGE_COLOR)
print(latestFilename)
diffImage = backSubInstance.getDiff(image)
resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(
batchCount) + '.jpg'
cv2.imwrite(resultFileName, diffImage)
batchCount += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filepath = './tl3Pictures/'
pathRGB = '.diff/'
extension = '*.jpg'
batchCount = 0
backSubInstance = backSub()
if not os.path.exists(filepath + pathRGB):
os.makedirs(filepath + pathRGB)
backSubInstance.setConfig('sample.cfg')
for filename in glob.glob(filepath + extension):
pathAndFile = os.path.splitext(filename)[0]
latestFilename = ntpath.basename(pathAndFile)
image = cv2.imread(filepath + latestFilename + '.jpg', cv2.
CV_LOAD_IMAGE_COLOR)
print(latestFilename)
diffImage = backSubInstance.getDiff(image)
resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(
batchCount) + '.jpg'
cv2.imwrite(resultFileName, diffImage)
batchCount += 1
<|reserved_special_token_1|>
import cv2
import numpy
import os
import glob
import ntpath
from backSub import *
from ConfigParser import SafeConfigParser
filepath = './tl3Pictures/'
pathRGB = '.diff/'
extension = '*.jpg'
batchCount = 0
backSubInstance = backSub()
if not os.path.exists(filepath + pathRGB):
os.makedirs(filepath + pathRGB)
backSubInstance.setConfig('sample.cfg')
for filename in glob.glob(filepath + extension):
pathAndFile = os.path.splitext(filename)[0]
latestFilename = ntpath.basename(pathAndFile)
image = cv2.imread(filepath + latestFilename + '.jpg', cv2.
CV_LOAD_IMAGE_COLOR)
print(latestFilename)
diffImage = backSubInstance.getDiff(image)
resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(
batchCount) + '.jpg'
cv2.imwrite(resultFileName, diffImage)
batchCount += 1
<|reserved_special_token_1|>
import cv2
import numpy
import os
import glob
import ntpath
from backSub import *
from ConfigParser import SafeConfigParser
filepath = "./tl3Pictures/" # where the input files are
pathRGB = ".diff/" # where the result is saved
extension = "*.jpg" # only jpg files considered
batchCount = 0
backSubInstance = backSub()
if not os.path.exists(filepath + pathRGB):
os.makedirs(filepath+pathRGB) #create the result folder if it
# is not there
backSubInstance.setConfig('sample.cfg') # load the backSub parameters
# from the configuration file
for filename in glob.glob(filepath + extension):
#print(filename) #full file name and path
pathAndFile = os.path.splitext(filename)[0]
#print(pathAndFile) #file name and path without extension
latestFilename = ntpath.basename(pathAndFile)
#print(latestFilename) #only file name
image = cv2.imread(filepath + latestFilename + ".jpg",\
cv2.CV_LOAD_IMAGE_COLOR) #read the image from the source
print(latestFilename)
diffImage = backSubInstance.getDiff(image) # get the difference image
resultFileName = filepath + pathRGB + latestFilename + "motion"+ \
str(batchCount) + ".jpg" #contruct the path where to save diffImage
cv2.imwrite(resultFileName, diffImage) # write the image to the
# destination
batchCount +=1
|
flexible
|
{
"blob_id": "506d33587ff6c8b2c3d9bc546307996d2f518d86",
"index": 2060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif not os.path.exists(filepath + pathRGB):\n os.makedirs(filepath + pathRGB)\nbackSubInstance.setConfig('sample.cfg')\nfor filename in glob.glob(filepath + extension):\n pathAndFile = os.path.splitext(filename)[0]\n latestFilename = ntpath.basename(pathAndFile)\n image = cv2.imread(filepath + latestFilename + '.jpg', cv2.\n CV_LOAD_IMAGE_COLOR)\n print(latestFilename)\n diffImage = backSubInstance.getDiff(image)\n resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(\n batchCount) + '.jpg'\n cv2.imwrite(resultFileName, diffImage)\n batchCount += 1\n",
"step-3": "<mask token>\nfilepath = './tl3Pictures/'\npathRGB = '.diff/'\nextension = '*.jpg'\nbatchCount = 0\nbackSubInstance = backSub()\nif not os.path.exists(filepath + pathRGB):\n os.makedirs(filepath + pathRGB)\nbackSubInstance.setConfig('sample.cfg')\nfor filename in glob.glob(filepath + extension):\n pathAndFile = os.path.splitext(filename)[0]\n latestFilename = ntpath.basename(pathAndFile)\n image = cv2.imread(filepath + latestFilename + '.jpg', cv2.\n CV_LOAD_IMAGE_COLOR)\n print(latestFilename)\n diffImage = backSubInstance.getDiff(image)\n resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(\n batchCount) + '.jpg'\n cv2.imwrite(resultFileName, diffImage)\n batchCount += 1\n",
"step-4": "import cv2\nimport numpy\nimport os\nimport glob\nimport ntpath\nfrom backSub import *\nfrom ConfigParser import SafeConfigParser\nfilepath = './tl3Pictures/'\npathRGB = '.diff/'\nextension = '*.jpg'\nbatchCount = 0\nbackSubInstance = backSub()\nif not os.path.exists(filepath + pathRGB):\n os.makedirs(filepath + pathRGB)\nbackSubInstance.setConfig('sample.cfg')\nfor filename in glob.glob(filepath + extension):\n pathAndFile = os.path.splitext(filename)[0]\n latestFilename = ntpath.basename(pathAndFile)\n image = cv2.imread(filepath + latestFilename + '.jpg', cv2.\n CV_LOAD_IMAGE_COLOR)\n print(latestFilename)\n diffImage = backSubInstance.getDiff(image)\n resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(\n batchCount) + '.jpg'\n cv2.imwrite(resultFileName, diffImage)\n batchCount += 1\n",
"step-5": "import cv2\r\nimport numpy\r\nimport os \r\nimport glob\r\nimport ntpath\r\nfrom backSub import *\r\nfrom ConfigParser import SafeConfigParser\r\n\r\n\r\nfilepath = \"./tl3Pictures/\" # where the input files are\r\npathRGB = \".diff/\" # where the result is saved\r\n\r\nextension = \"*.jpg\" # only jpg files considered\r\nbatchCount = 0\r\nbackSubInstance = backSub()\r\n\r\n\r\nif not os.path.exists(filepath + pathRGB):\r\n\tos.makedirs(filepath+pathRGB) #create the result folder if it \r\n\t\t\t\t\t\t\t\t # is not there \r\n\r\nbackSubInstance.setConfig('sample.cfg') # load the backSub parameters \r\n\t\t\t\t\t\t\t\t # from the configuration file\t\r\n\r\nfor filename in glob.glob(filepath + extension): \r\n\t#print(filename) #full file name and path\r\n\tpathAndFile = os.path.splitext(filename)[0]\r\n\t#print(pathAndFile)\t#file name and path without extension \r\n\tlatestFilename = ntpath.basename(pathAndFile)\r\n\t#print(latestFilename) #only file name\r\n\r\n\timage = cv2.imread(filepath + latestFilename + \".jpg\",\\\r\n\t\tcv2.CV_LOAD_IMAGE_COLOR) #read the image from the source\r\n\tprint(latestFilename)\r\n\tdiffImage = backSubInstance.getDiff(image) # get the difference image\r\n\r\n\tresultFileName = filepath + pathRGB + latestFilename + \"motion\"+ \\\r\n\t str(batchCount) + \".jpg\" #contruct the path where to save diffImage\r\n\tcv2.imwrite(resultFileName, diffImage) # write the image to the\r\n\t \t\t\t\t\t\t\t\t\t\t# destination\r\n\tbatchCount +=1 \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from .models import GetInTouch
class GetInTouchForm(forms.ModelForm):
class Meta:
model = GetInTouch
fields = '__all__'
|
normal
|
{
"blob_id": "c8dc143c09aa7f677167a4942ae1c4a0fbf75128",
"index": 3219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetInTouchForm(forms.ModelForm):\n\n\n class Meta:\n model = GetInTouch\n fields = '__all__'\n",
"step-3": "from django import forms\nfrom .models import GetInTouch\n\n\nclass GetInTouchForm(forms.ModelForm):\n\n\n class Meta:\n model = GetInTouch\n fields = '__all__'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'ALR1.html')
def search(request):
return render(request, 'ALR2.html')
def home(request):
return render(request, 'ALR3.html')
def pdf(request):
pdfId = request.GET['id']
# pdf_data=open('pdf/' + pdfId + '.pdf','rb').read()
pdf_data=open('pdf/test.pdf','rb').read()
return HttpResponse(pdf_data, content_type='application/pdf')
|
normal
|
{
"blob_id": "d9f586bbb72021ee0b37ff8660e26b50d7e6a2d3",
"index": 569,
"step-1": "<mask token>\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\n<mask token>\n\n\ndef pdf(request):\n pdfId = request.GET['id']\n pdf_data = open('pdf/test.pdf', 'rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"step-3": "<mask token>\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\ndef pdf(request):\n pdfId = request.GET['id']\n pdf_data = open('pdf/test.pdf', 'rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\ndef pdf(request):\n pdfId = request.GET['id']\n pdf_data = open('pdf/test.pdf', 'rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"step-5": "from django.http import HttpResponse\nfrom django.shortcuts import render\ndef index(request):\n return render(request, 'ALR1.html')\ndef search(request):\n return render(request, 'ALR2.html')\ndef home(request):\n return render(request, 'ALR3.html')\ndef pdf(request):\n pdfId = request.GET['id']\n # pdf_data=open('pdf/' + pdfId + '.pdf','rb').read()\n pdf_data=open('pdf/test.pdf','rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
charset = {'big5': ['big5_chinese_ci', 'big5_bin'], 'dec8': [
'dec8_swedish_ci', 'dec8_bin'], 'cp850': ['cp850_general_ci',
'cp850_bin'], 'hp8': ['hp8_english_ci', 'hp8_bin'], 'koi8r': [
'koi8r_general_ci', 'koi8r_bin'], 'latin1': ['latin1_swedish_ci',
'latin1_german1_ci', 'latin1_danish_ci', 'latin1_german2_ci',
'latin1_bin', 'latin1_general_ci', 'latin1_general_cs',
'latin1_spanish_ci'], 'latin2': ['latin2_general_ci', 'latin2_czech_cs',
'latin2_hungarian_ci', 'latin2_croatian_ci', 'latin2_bin'], 'swe7': [
'swe7_swedish_ci', 'swe7_bin'], 'ascii': ['ascii_general_ci',
'ascii_bin'], 'ujis': ['ujis_japanese_ci', 'ujis_bin'], 'sjis': [
'sjis_japanese_ci', 'sjis_bin'], 'hebrew': ['hebrew_general_ci',
'hebrew_bin'], 'tis620': ['tis620_thai_ci', 'tis620_bin'], 'euckr': [
'euckr_korean_ci', 'euckr_bin'], 'koi8u': ['koi8u_general_ci',
'koi8u_bin'], 'gb2312': ['gb2312_chinese_ci', 'gb2312_bin'], 'greek': [
'greek_general_ci', 'greek_bin'], 'cp1250': ['cp1250_general_ci',
'cp1250_czech_cs', 'cp1250_croatian_ci', 'cp1250_bin',
'cp1250_polish_ci'], 'gbk': ['gbk_chinese_ci', 'gbk_bin'], 'latin5': [
'latin5_turkish_ci', 'latin5_bin'], 'armscii8': ['armscii8_general_ci',
'armscii8_bin'], 'utf8': ['utf8_general_ci', 'utf8_bin',
'utf8_unicode_ci', 'utf8_icelandic_ci', 'utf8_latvian_ci',
'utf8_romanian_ci', 'utf8_slovenian_ci', 'utf8_polish_ci',
'utf8_estonian_ci', 'utf8_spanish_ci', 'utf8_swedish_ci',
'utf8_turkish_ci', 'utf8_czech_ci', 'utf8_danish_ci',
'utf8_lithuanian_ci', 'utf8_slovak_ci', 'utf8_spanish2_ci',
'utf8_roman_ci', 'utf8_persian_ci', 'utf8_esperanto_ci',
'utf8_hungarian_ci', 'utf8_sinhala_ci', 'utf8_german2_ci',
'utf8_croatian_ci', 'utf8_unicode_520_ci', 'utf8_vietnamese_ci',
'utf8_general_mysql500_ci'], 'utf8mb4': ['utf8mb4_0900_ai_ci'],
'utf8mb3': ['utf8mb3_general_ci'], 'ucs2': ['ucs2_general_ci',
'ucs2_bin', 'ucs2_unicode_ci', 'ucs2_icelandic_ci', 'ucs2_latvian_ci',
'ucs2_romanian_ci', 'ucs2_slovenian_ci', 'ucs2_polish_ci',
'ucs2_estonian_ci', 'ucs2_spanish_ci', 'ucs2_swedish_ci',
'ucs2_turkish_ci', 'ucs2_czech_ci', 'ucs2_danish_ci',
'ucs2_lithuanian_ci', 'ucs2_slovak_ci', 'ucs2_spanish2_ci',
'ucs2_roman_ci', 'ucs2_persian_ci', 'ucs2_esperanto_ci',
'ucs2_hungarian_ci', 'ucs2_sinhala_ci', 'ucs2_german2_ci',
'ucs2_croatian_ci', 'ucs2_unicode_520_ci', 'ucs2_vietnamese_ci',
'ucs2_general_mysql500_ci'], 'cp866': ['cp866_general_ci', 'cp866_bin'],
'keybcs2': ['keybcs2_general_ci', 'keybcs2_bin'], 'macce': [
'macce_general_ci', 'macce_bin'], 'macroman': ['macroman_general_ci',
'macroman_bin'], 'cp852': ['cp852_general_ci', 'cp852_bin'], 'latin7':
['latin7_general_ci', 'latin7_estonian_cs', 'latin7_general_cs',
'latin7_bin'], 'utf8mb4': ['utf8mb4_general_ci', 'utf8mb4_bin',
'utf8mb4_unicode_ci', 'utf8mb4_icelandic_ci', 'utf8mb4_latvian_ci',
'utf8mb4_romanian_ci', 'utf8mb4_slovenian_ci', 'utf8mb4_polish_ci',
'utf8mb4_estonian_ci', 'utf8mb4_spanish_ci', 'utf8mb4_swedish_ci',
'utf8mb4_turkish_ci', 'utf8mb4_czech_ci', 'utf8mb4_danish_ci',
'utf8mb4_lithuanian_ci', 'utf8mb4_slovak_ci', 'utf8mb4_spanish2_ci',
'utf8mb4_roman_ci', 'utf8mb4_persian_ci', 'utf8mb4_esperanto_ci',
'utf8mb4_hungarian_ci', 'utf8mb4_sinhala_ci', 'utf8mb4_german2_ci',
'utf8mb4_croatian_ci', 'utf8mb4_unicode_520_ci',
'utf8mb4_vietnamese_ci'], 'cp1251': ['cp1251_general_ci',
'cp1251_bulgarian_ci', 'cp1251_ukrainian_ci', 'cp1251_bin',
'cp1251_general_cs'], 'utf16': ['utf16_general_ci', 'utf16_bin',
'utf16_unicode_ci', 'utf16_icelandic_ci', 'utf16_latvian_ci',
'utf16_romanian_ci', 'utf16_slovenian_ci', 'utf16_polish_ci',
'utf16_estonian_ci', 'utf16_spanish_ci', 'utf16_swedish_ci',
'utf16_turkish_ci', 'utf16_czech_ci', 'utf16_danish_ci',
'utf16_lithuanian_ci', 'utf16_slovak_ci', 'utf16_spanish2_ci',
'utf16_roman_ci', 'utf16_persian_ci', 'utf16_esperanto_ci',
'utf16_hungarian_ci', 'utf16_sinhala_ci', 'utf16_german2_ci',
'utf16_croatian_ci', 'utf16_unicode_520_ci', 'utf16_vietnamese_ci'],
'utf16le': ['utf16le_general_ci', 'utf16le_bin'], 'cp1256': [
'cp1256_general_ci', 'cp1256_bin'], 'cp1257': ['cp1257_general_ci',
'cp1257_lithuanian_ci', 'cp1257_bin'], 'utf32': ['utf32_general_ci',
'utf32_bin', 'utf32_unicode_ci', 'utf32_icelandic_ci',
'utf32_latvian_ci', 'utf32_romanian_ci', 'utf32_slovenian_ci',
'utf32_polish_ci', 'utf32_estonian_ci', 'utf32_spanish_ci',
'utf32_swedish_ci', 'utf32_turkish_ci', 'utf32_czech_ci',
'utf32_danish_ci', 'utf32_lithuanian_ci', 'utf32_slovak_ci',
'utf32_spanish2_ci', 'utf32_roman_ci', 'utf32_persian_ci',
'utf32_esperanto_ci', 'utf32_hungarian_ci', 'utf32_sinhala_ci',
'utf32_german2_ci', 'utf32_croatian_ci', 'utf32_unicode_520_ci',
'utf32_vietnamese_ci'], 'binary': ['binary'], 'geostd8': [
'geostd8_general_ci', 'geostd8_bin'], 'cp932': ['cp932_japanese_ci',
'cp932_bin'], 'eucjpms': ['eucjpms_japanese_ci', 'eucjpms_bin'],
'gb18030': ['gb18030_chinese_ci', 'gb18030_bin', 'gb18030_unicode_520_ci']}
collation = {'big5_chinese_ci': 'big5', 'big5_bin': 'big5',
'dec8_swedish_ci': 'dec8', 'dec8_bin': 'dec8', 'cp850_general_ci':
'cp850', 'cp850_bin': 'cp850', 'hp8_english_ci': 'hp8', 'hp8_bin':
'hp8', 'koi8r_general_ci': 'koi8r', 'koi8r_bin': 'koi8r',
'latin1_german1_ci': 'latin1', 'latin1_swedish_ci': 'latin1',
'latin1_danish_ci': 'latin1', 'latin1_german2_ci': 'latin1',
'latin1_bin': 'latin1', 'latin1_general_ci': 'latin1',
'latin1_general_cs': 'latin1', 'latin1_spanish_ci': 'latin1',
'latin2_czech_cs': 'latin2', 'latin2_general_ci': 'latin2',
'latin2_hungarian_ci': 'latin2', 'latin2_croatian_ci': 'latin2',
'latin2_bin': 'latin2', 'swe7_swedish_ci': 'swe7', 'swe7_bin': 'swe7',
'ascii_general_ci': 'ascii', 'ascii_bin': 'ascii', 'ujis_japanese_ci':
'ujis', 'ujis_bin': 'ujis', 'sjis_japanese_ci': 'sjis', 'sjis_bin':
'sjis', 'hebrew_general_ci': 'hebrew', 'hebrew_bin': 'hebrew',
'tis620_thai_ci': 'tis620', 'tis620_bin': 'tis620', 'euckr_korean_ci':
'euckr', 'euckr_bin': 'euckr', 'koi8u_general_ci': 'koi8u', 'koi8u_bin':
'koi8u', 'gb2312_chinese_ci': 'gb2312', 'gb2312_bin': 'gb2312',
'greek_general_ci': 'greek', 'greek_bin': 'greek', 'cp1250_general_ci':
'cp1250', 'cp1250_czech_cs': 'cp1250', 'cp1250_croatian_ci': 'cp1250',
'cp1250_bin': 'cp1250', 'cp1250_polish_ci': 'cp1250', 'gbk_chinese_ci':
'gbk', 'gbk_bin': 'gbk', 'latin5_turkish_ci': 'latin5', 'latin5_bin':
'latin5', 'armscii8_general_ci': 'armscii8', 'armscii8_bin': 'armscii8',
'utf8_general_ci': 'utf8', 'utf8mb3_general_ci': 'utf8mb3', 'utf8_bin':
'utf8', 'utf8_unicode_ci': 'utf8', 'utf8_icelandic_ci': 'utf8',
'utf8_latvian_ci': 'utf8', 'utf8_romanian_ci': 'utf8',
'utf8_slovenian_ci': 'utf8', 'utf8_polish_ci': 'utf8',
'utf8_estonian_ci': 'utf8', 'utf8_spanish_ci': 'utf8',
'utf8_swedish_ci': 'utf8', 'utf8_turkish_ci': 'utf8', 'utf8_czech_ci':
'utf8', 'utf8_danish_ci': 'utf8', 'utf8_lithuanian_ci': 'utf8',
'utf8_slovak_ci': 'utf8', 'utf8_spanish2_ci': 'utf8', 'utf8_roman_ci':
'utf8', 'utf8_persian_ci': 'utf8', 'utf8_esperanto_ci': 'utf8',
'utf8_hungarian_ci': 'utf8', 'utf8_sinhala_ci': 'utf8',
'utf8_german2_ci': 'utf8', 'utf8_croatian_ci': 'utf8',
'utf8_unicode_520_ci': 'utf8', 'utf8_vietnamese_ci': 'utf8',
'utf8_general_mysql500_ci': 'utf8', 'utf8mb4_0900_ai_ci': 'utf8mb4',
'ucs2_general_ci': 'ucs2', 'ucs2_bin': 'ucs2', 'ucs2_unicode_ci':
'ucs2', 'ucs2_icelandic_ci': 'ucs2', 'ucs2_latvian_ci': 'ucs2',
'ucs2_romanian_ci': 'ucs2', 'ucs2_slovenian_ci': 'ucs2',
'ucs2_polish_ci': 'ucs2', 'ucs2_estonian_ci': 'ucs2', 'ucs2_spanish_ci':
'ucs2', 'ucs2_swedish_ci': 'ucs2', 'ucs2_turkish_ci': 'ucs2',
'ucs2_czech_ci': 'ucs2', 'ucs2_danish_ci': 'ucs2', 'ucs2_lithuanian_ci':
'ucs2', 'ucs2_slovak_ci': 'ucs2', 'ucs2_spanish2_ci': 'ucs2',
'ucs2_roman_ci': 'ucs2', 'ucs2_persian_ci': 'ucs2', 'ucs2_esperanto_ci':
'ucs2', 'ucs2_hungarian_ci': 'ucs2', 'ucs2_sinhala_ci': 'ucs2',
'ucs2_german2_ci': 'ucs2', 'ucs2_croatian_ci': 'ucs2',
'ucs2_unicode_520_ci': 'ucs2', 'ucs2_vietnamese_ci': 'ucs2',
'ucs2_general_mysql500_ci': 'ucs2', 'cp866_general_ci': 'cp866',
'cp866_bin': 'cp866', 'keybcs2_general_ci': 'keybcs2', 'keybcs2_bin':
'keybcs2', 'macce_general_ci': 'macce', 'macce_bin': 'macce',
'macroman_general_ci': 'macroman', 'macroman_bin': 'macroman',
'cp852_general_ci': 'cp852', 'cp852_bin': 'cp852', 'latin7_estonian_cs':
'latin7', 'latin7_general_ci': 'latin7', 'latin7_general_cs': 'latin7',
'latin7_bin': 'latin7', 'utf8mb4_general_ci': 'utf8mb4', 'utf8mb4_bin':
'utf8mb4', 'utf8mb4_unicode_ci': 'utf8mb4', 'utf8mb4_icelandic_ci':
'utf8mb4', 'utf8mb4_latvian_ci': 'utf8mb4', 'utf8mb4_romanian_ci':
'utf8mb4', 'utf8mb4_slovenian_ci': 'utf8mb4', 'utf8mb4_polish_ci':
'utf8mb4', 'utf8mb4_estonian_ci': 'utf8mb4', 'utf8mb4_spanish_ci':
'utf8mb4', 'utf8mb4_swedish_ci': 'utf8mb4', 'utf8mb4_turkish_ci':
'utf8mb4', 'utf8mb4_czech_ci': 'utf8mb4', 'utf8mb4_danish_ci':
'utf8mb4', 'utf8mb4_lithuanian_ci': 'utf8mb4', 'utf8mb4_slovak_ci':
'utf8mb4', 'utf8mb4_spanish2_ci': 'utf8mb4', 'utf8mb4_roman_ci':
'utf8mb4', 'utf8mb4_persian_ci': 'utf8mb4', 'utf8mb4_esperanto_ci':
'utf8mb4', 'utf8mb4_hungarian_ci': 'utf8mb4', 'utf8mb4_sinhala_ci':
'utf8mb4', 'utf8mb4_german2_ci': 'utf8mb4', 'utf8mb4_croatian_ci':
'utf8mb4', 'utf8mb4_unicode_520_ci': 'utf8mb4', 'utf8mb4_vietnamese_ci':
'utf8mb4', 'cp1251_bulgarian_ci': 'cp1251', 'cp1251_ukrainian_ci':
'cp1251', 'cp1251_bin': 'cp1251', 'cp1251_general_ci': 'cp1251',
'cp1251_general_cs': 'cp1251', 'utf16_general_ci': 'utf16', 'utf16_bin':
'utf16', 'utf16_unicode_ci': 'utf16', 'utf16_icelandic_ci': 'utf16',
'utf16_latvian_ci': 'utf16', 'utf16_romanian_ci': 'utf16',
'utf16_slovenian_ci': 'utf16', 'utf16_polish_ci': 'utf16',
'utf16_estonian_ci': 'utf16', 'utf16_spanish_ci': 'utf16',
'utf16_swedish_ci': 'utf16', 'utf16_turkish_ci': 'utf16',
'utf16_czech_ci': 'utf16', 'utf16_danish_ci': 'utf16',
'utf16_lithuanian_ci': 'utf16', 'utf16_slovak_ci': 'utf16',
'utf16_spanish2_ci': 'utf16', 'utf16_roman_ci': 'utf16',
'utf16_persian_ci': 'utf16', 'utf16_esperanto_ci': 'utf16',
'utf16_hungarian_ci': 'utf16', 'utf16_sinhala_ci': 'utf16',
'utf16_german2_ci': 'utf16', 'utf16_croatian_ci': 'utf16',
'utf16_unicode_520_ci': 'utf16', 'utf16_vietnamese_ci': 'utf16',
'utf16le_general_ci': 'utf16le', 'utf16le_bin': 'utf16le',
'cp1256_general_ci': 'cp1256', 'cp1256_bin': 'cp1256',
'cp1257_lithuanian_ci': 'cp1257', 'cp1257_bin': 'cp1257',
'cp1257_general_ci': 'cp1257', 'utf32_general_ci': 'utf32', 'utf32_bin':
'utf32', 'utf32_unicode_ci': 'utf32', 'utf32_icelandic_ci': 'utf32',
'utf32_latvian_ci': 'utf32', 'utf32_romanian_ci': 'utf32',
'utf32_slovenian_ci': 'utf32', 'utf32_polish_ci': 'utf32',
'utf32_estonian_ci': 'utf32', 'utf32_spanish_ci': 'utf32',
'utf32_swedish_ci': 'utf32', 'utf32_turkish_ci': 'utf32',
'utf32_czech_ci': 'utf32', 'utf32_danish_ci': 'utf32',
'utf32_lithuanian_ci': 'utf32', 'utf32_slovak_ci': 'utf32',
'utf32_spanish2_ci': 'utf32', 'utf32_roman_ci': 'utf32',
'utf32_persian_ci': 'utf32', 'utf32_esperanto_ci': 'utf32',
'utf32_hungarian_ci': 'utf32', 'utf32_sinhala_ci': 'utf32',
'utf32_german2_ci': 'utf32', 'utf32_croatian_ci': 'utf32',
'utf32_unicode_520_ci': 'utf32', 'utf32_vietnamese_ci': 'utf32',
'binary': 'binary', 'geostd8_general_ci': 'geostd8', 'geostd8_bin':
'geostd8', 'cp932_japanese_ci': 'cp932', 'cp932_bin': 'cp932',
'eucjpms_japanese_ci': 'eucjpms', 'eucjpms_bin': 'eucjpms',
'gb18030_chinese_ci': 'gb18030', 'gb18030_bin': 'gb18030',
'gb18030_unicode_520_ci': 'gb18030'}
<|reserved_special_token_1|>
# Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
charset = {"big5": ["big5_chinese_ci", "big5_bin"],
"dec8": ["dec8_swedish_ci", "dec8_bin"],
"cp850": ["cp850_general_ci", "cp850_bin"],
"hp8": ["hp8_english_ci", "hp8_bin"],
"koi8r": ["koi8r_general_ci", "koi8r_bin"],
"latin1": ["latin1_swedish_ci",
"latin1_german1_ci",
"latin1_danish_ci",
"latin1_german2_ci",
"latin1_bin",
"latin1_general_ci",
"latin1_general_cs",
"latin1_spanish_ci"],
"latin2": ["latin2_general_ci",
"latin2_czech_cs",
"latin2_hungarian_ci",
"latin2_croatian_ci",
"latin2_bin"],
"swe7": ["swe7_swedish_ci", "swe7_bin"],
"ascii": ["ascii_general_ci", "ascii_bin"],
"ujis": ["ujis_japanese_ci", "ujis_bin"],
"sjis": ["sjis_japanese_ci", "sjis_bin"],
"hebrew": ["hebrew_general_ci", "hebrew_bin"],
"tis620": ["tis620_thai_ci", "tis620_bin"],
"euckr": ["euckr_korean_ci", "euckr_bin"],
"koi8u": ["koi8u_general_ci", "koi8u_bin"],
"gb2312": ["gb2312_chinese_ci", "gb2312_bin"],
"greek": ["greek_general_ci", "greek_bin"],
"cp1250": ["cp1250_general_ci",
"cp1250_czech_cs",
"cp1250_croatian_ci",
"cp1250_bin",
"cp1250_polish_ci"],
"gbk": ["gbk_chinese_ci", "gbk_bin"],
"latin5": ["latin5_turkish_ci", "latin5_bin"],
"armscii8": ["armscii8_general_ci", "armscii8_bin"],
"utf8": ["utf8_general_ci",
"utf8_bin",
"utf8_unicode_ci",
"utf8_icelandic_ci",
"utf8_latvian_ci",
"utf8_romanian_ci",
"utf8_slovenian_ci",
"utf8_polish_ci",
"utf8_estonian_ci",
"utf8_spanish_ci",
"utf8_swedish_ci",
"utf8_turkish_ci",
"utf8_czech_ci",
"utf8_danish_ci",
"utf8_lithuanian_ci",
"utf8_slovak_ci",
"utf8_spanish2_ci",
"utf8_roman_ci",
"utf8_persian_ci",
"utf8_esperanto_ci",
"utf8_hungarian_ci",
"utf8_sinhala_ci",
"utf8_german2_ci",
"utf8_croatian_ci",
"utf8_unicode_520_ci",
"utf8_vietnamese_ci",
"utf8_general_mysql500_ci"
],
"utf8mb4": ["utf8mb4_0900_ai_ci"],
"utf8mb3": ["utf8mb3_general_ci"],
"ucs2": ["ucs2_general_ci",
"ucs2_bin",
"ucs2_unicode_ci",
"ucs2_icelandic_ci",
"ucs2_latvian_ci",
"ucs2_romanian_ci",
"ucs2_slovenian_ci",
"ucs2_polish_ci",
"ucs2_estonian_ci",
"ucs2_spanish_ci",
"ucs2_swedish_ci",
"ucs2_turkish_ci",
"ucs2_czech_ci",
"ucs2_danish_ci",
"ucs2_lithuanian_ci",
"ucs2_slovak_ci",
"ucs2_spanish2_ci",
"ucs2_roman_ci",
"ucs2_persian_ci",
"ucs2_esperanto_ci",
"ucs2_hungarian_ci",
"ucs2_sinhala_ci",
"ucs2_german2_ci",
"ucs2_croatian_ci",
"ucs2_unicode_520_ci",
"ucs2_vietnamese_ci",
"ucs2_general_mysql500_ci"
],
"cp866": ["cp866_general_ci", "cp866_bin"],
"keybcs2": ["keybcs2_general_ci", "keybcs2_bin"],
"macce": ["macce_general_ci", "macce_bin"],
"macroman": ["macroman_general_ci", "macroman_bin"],
"cp852": ["cp852_general_ci", "cp852_bin"],
"latin7": ["latin7_general_ci",
"latin7_estonian_cs",
"latin7_general_cs",
"latin7_bin"],
"utf8mb4": ["utf8mb4_general_ci",
"utf8mb4_bin",
"utf8mb4_unicode_ci",
"utf8mb4_icelandic_ci",
"utf8mb4_latvian_ci",
"utf8mb4_romanian_ci",
"utf8mb4_slovenian_ci",
"utf8mb4_polish_ci",
"utf8mb4_estonian_ci",
"utf8mb4_spanish_ci",
"utf8mb4_swedish_ci",
"utf8mb4_turkish_ci",
"utf8mb4_czech_ci",
"utf8mb4_danish_ci",
"utf8mb4_lithuanian_ci",
"utf8mb4_slovak_ci",
"utf8mb4_spanish2_ci",
"utf8mb4_roman_ci",
"utf8mb4_persian_ci",
"utf8mb4_esperanto_ci",
"utf8mb4_hungarian_ci",
"utf8mb4_sinhala_ci",
"utf8mb4_german2_ci",
"utf8mb4_croatian_ci",
"utf8mb4_unicode_520_ci",
"utf8mb4_vietnamese_ci"],
"cp1251": ["cp1251_general_ci",
"cp1251_bulgarian_ci",
"cp1251_ukrainian_ci",
"cp1251_bin",
"cp1251_general_cs"],
"utf16": ["utf16_general_ci",
"utf16_bin",
"utf16_unicode_ci",
"utf16_icelandic_ci",
"utf16_latvian_ci",
"utf16_romanian_ci",
"utf16_slovenian_ci",
"utf16_polish_ci",
"utf16_estonian_ci",
"utf16_spanish_ci",
"utf16_swedish_ci",
"utf16_turkish_ci",
"utf16_czech_ci",
"utf16_danish_ci",
"utf16_lithuanian_ci",
"utf16_slovak_ci",
"utf16_spanish2_ci",
"utf16_roman_ci",
"utf16_persian_ci",
"utf16_esperanto_ci",
"utf16_hungarian_ci",
"utf16_sinhala_ci",
"utf16_german2_ci",
"utf16_croatian_ci",
"utf16_unicode_520_ci",
"utf16_vietnamese_ci"],
"utf16le": ["utf16le_general_ci",
"utf16le_bin"],
"cp1256": ["cp1256_general_ci", "cp1256_bin"],
"cp1257": ["cp1257_general_ci",
"cp1257_lithuanian_ci",
"cp1257_bin"],
"utf32": ["utf32_general_ci",
"utf32_bin",
"utf32_unicode_ci",
"utf32_icelandic_ci",
"utf32_latvian_ci",
"utf32_romanian_ci",
"utf32_slovenian_ci",
"utf32_polish_ci",
"utf32_estonian_ci",
"utf32_spanish_ci",
"utf32_swedish_ci",
"utf32_turkish_ci",
"utf32_czech_ci",
"utf32_danish_ci",
"utf32_lithuanian_ci",
"utf32_slovak_ci",
"utf32_spanish2_ci",
"utf32_roman_ci",
"utf32_persian_ci",
"utf32_esperanto_ci",
"utf32_hungarian_ci",
"utf32_sinhala_ci",
"utf32_german2_ci",
"utf32_croatian_ci",
"utf32_unicode_520_ci",
"utf32_vietnamese_ci"],
"binary": ["binary"],
"geostd8": ["geostd8_general_ci", "geostd8_bin"],
"cp932": ["cp932_japanese_ci", "cp932_bin"],
"eucjpms": ["eucjpms_japanese_ci", "eucjpms_bin"],
"gb18030": ["gb18030_chinese_ci",
"gb18030_bin",
"gb18030_unicode_520_ci"]}
collation = {"big5_chinese_ci": "big5",
"big5_bin": "big5",
"dec8_swedish_ci": "dec8",
"dec8_bin": "dec8",
"cp850_general_ci": "cp850",
"cp850_bin": "cp850",
"hp8_english_ci": "hp8",
"hp8_bin": "hp8",
"koi8r_general_ci": "koi8r",
"koi8r_bin": "koi8r",
"latin1_german1_ci": "latin1",
"latin1_swedish_ci": "latin1",
"latin1_danish_ci": "latin1",
"latin1_german2_ci": "latin1",
"latin1_bin": "latin1",
"latin1_general_ci": "latin1",
"latin1_general_cs": "latin1",
"latin1_spanish_ci": "latin1",
"latin2_czech_cs": "latin2",
"latin2_general_ci": "latin2",
"latin2_hungarian_ci": "latin2",
"latin2_croatian_ci": "latin2",
"latin2_bin": "latin2",
"swe7_swedish_ci": "swe7",
"swe7_bin": "swe7",
"ascii_general_ci": "ascii",
"ascii_bin": "ascii",
"ujis_japanese_ci": "ujis",
"ujis_bin": "ujis",
"sjis_japanese_ci": "sjis",
"sjis_bin": "sjis",
"hebrew_general_ci": "hebrew",
"hebrew_bin": "hebrew",
"tis620_thai_ci": "tis620",
"tis620_bin": "tis620",
"euckr_korean_ci": "euckr",
"euckr_bin": "euckr",
"koi8u_general_ci": "koi8u",
"koi8u_bin": "koi8u",
"gb2312_chinese_ci": "gb2312",
"gb2312_bin": "gb2312",
"greek_general_ci": "greek",
"greek_bin": "greek",
"cp1250_general_ci": "cp1250",
"cp1250_czech_cs": "cp1250",
"cp1250_croatian_ci": "cp1250",
"cp1250_bin": "cp1250",
"cp1250_polish_ci": "cp1250",
"gbk_chinese_ci": "gbk",
"gbk_bin": "gbk",
"latin5_turkish_ci": "latin5",
"latin5_bin": "latin5",
"armscii8_general_ci": "armscii8",
"armscii8_bin": "armscii8",
"utf8_general_ci": "utf8",
"utf8mb3_general_ci": "utf8mb3",
"utf8_bin": "utf8",
"utf8_unicode_ci": "utf8",
"utf8_icelandic_ci": "utf8",
"utf8_latvian_ci": "utf8",
"utf8_romanian_ci": "utf8",
"utf8_slovenian_ci": "utf8",
"utf8_polish_ci": "utf8",
"utf8_estonian_ci": "utf8",
"utf8_spanish_ci": "utf8",
"utf8_swedish_ci": "utf8",
"utf8_turkish_ci": "utf8",
"utf8_czech_ci": "utf8",
"utf8_danish_ci": "utf8",
"utf8_lithuanian_ci": "utf8",
"utf8_slovak_ci": "utf8",
"utf8_spanish2_ci": "utf8",
"utf8_roman_ci": "utf8",
"utf8_persian_ci": "utf8",
"utf8_esperanto_ci": "utf8",
"utf8_hungarian_ci": "utf8",
"utf8_sinhala_ci": "utf8",
"utf8_german2_ci": "utf8",
"utf8_croatian_ci": "utf8",
"utf8_unicode_520_ci": "utf8",
"utf8_vietnamese_ci": "utf8",
"utf8_general_mysql500_ci": "utf8",
"utf8mb4_0900_ai_ci": "utf8mb4",
"ucs2_general_ci": "ucs2",
"ucs2_bin": "ucs2",
"ucs2_unicode_ci": "ucs2",
"ucs2_icelandic_ci": "ucs2",
"ucs2_latvian_ci": "ucs2",
"ucs2_romanian_ci": "ucs2",
"ucs2_slovenian_ci": "ucs2",
"ucs2_polish_ci": "ucs2",
"ucs2_estonian_ci": "ucs2",
"ucs2_spanish_ci": "ucs2",
"ucs2_swedish_ci": "ucs2",
"ucs2_turkish_ci": "ucs2",
"ucs2_czech_ci": "ucs2",
"ucs2_danish_ci": "ucs2",
"ucs2_lithuanian_ci": "ucs2",
"ucs2_slovak_ci": "ucs2",
"ucs2_spanish2_ci": "ucs2",
"ucs2_roman_ci": "ucs2",
"ucs2_persian_ci": "ucs2",
"ucs2_esperanto_ci": "ucs2",
"ucs2_hungarian_ci": "ucs2",
"ucs2_sinhala_ci": "ucs2",
"ucs2_german2_ci": "ucs2",
"ucs2_croatian_ci": "ucs2",
"ucs2_unicode_520_ci": "ucs2",
"ucs2_vietnamese_ci": "ucs2",
"ucs2_general_mysql500_ci": "ucs2",
"cp866_general_ci": "cp866",
"cp866_bin": "cp866",
"keybcs2_general_ci": "keybcs2",
"keybcs2_bin": "keybcs2",
"macce_general_ci": "macce",
"macce_bin": "macce",
"macroman_general_ci": "macroman",
"macroman_bin": "macroman",
"cp852_general_ci": "cp852",
"cp852_bin": "cp852",
"latin7_estonian_cs": "latin7",
"latin7_general_ci": "latin7",
"latin7_general_cs": "latin7",
"latin7_bin": "latin7",
"utf8mb4_general_ci": "utf8mb4",
"utf8mb4_bin": "utf8mb4",
"utf8mb4_unicode_ci": "utf8mb4",
"utf8mb4_icelandic_ci": "utf8mb4",
"utf8mb4_latvian_ci": "utf8mb4",
"utf8mb4_romanian_ci": "utf8mb4",
"utf8mb4_slovenian_ci": "utf8mb4",
"utf8mb4_polish_ci": "utf8mb4",
"utf8mb4_estonian_ci": "utf8mb4",
"utf8mb4_spanish_ci": "utf8mb4",
"utf8mb4_swedish_ci": "utf8mb4",
"utf8mb4_turkish_ci": "utf8mb4",
"utf8mb4_czech_ci": "utf8mb4",
"utf8mb4_danish_ci": "utf8mb4",
"utf8mb4_lithuanian_ci": "utf8mb4",
"utf8mb4_slovak_ci": "utf8mb4",
"utf8mb4_spanish2_ci": "utf8mb4",
"utf8mb4_roman_ci": "utf8mb4",
"utf8mb4_persian_ci": "utf8mb4",
"utf8mb4_esperanto_ci": "utf8mb4",
"utf8mb4_hungarian_ci": "utf8mb4",
"utf8mb4_sinhala_ci": "utf8mb4",
"utf8mb4_german2_ci": "utf8mb4",
"utf8mb4_croatian_ci": "utf8mb4",
"utf8mb4_unicode_520_ci": "utf8mb4",
"utf8mb4_vietnamese_ci": "utf8mb4",
"cp1251_bulgarian_ci": "cp1251",
"cp1251_ukrainian_ci": "cp1251",
"cp1251_bin": "cp1251",
"cp1251_general_ci": "cp1251",
"cp1251_general_cs": "cp1251",
"utf16_general_ci": "utf16",
"utf16_bin": "utf16",
"utf16_unicode_ci": "utf16",
"utf16_icelandic_ci": "utf16",
"utf16_latvian_ci": "utf16",
"utf16_romanian_ci": "utf16",
"utf16_slovenian_ci": "utf16",
"utf16_polish_ci": "utf16",
"utf16_estonian_ci": "utf16",
"utf16_spanish_ci": "utf16",
"utf16_swedish_ci": "utf16",
"utf16_turkish_ci": "utf16",
"utf16_czech_ci": "utf16",
"utf16_danish_ci": "utf16",
"utf16_lithuanian_ci": "utf16",
"utf16_slovak_ci": "utf16",
"utf16_spanish2_ci": "utf16",
"utf16_roman_ci": "utf16",
"utf16_persian_ci": "utf16",
"utf16_esperanto_ci": "utf16",
"utf16_hungarian_ci": "utf16",
"utf16_sinhala_ci": "utf16",
"utf16_german2_ci": "utf16",
"utf16_croatian_ci": "utf16",
"utf16_unicode_520_ci": "utf16",
"utf16_vietnamese_ci": "utf16",
"utf16le_general_ci": "utf16le",
"utf16le_bin": "utf16le",
"cp1256_general_ci": "cp1256",
"cp1256_bin": "cp1256",
"cp1257_lithuanian_ci": "cp1257",
"cp1257_bin": "cp1257",
"cp1257_general_ci": "cp1257",
"utf32_general_ci": "utf32",
"utf32_bin": "utf32",
"utf32_unicode_ci": "utf32",
"utf32_icelandic_ci": "utf32",
"utf32_latvian_ci": "utf32",
"utf32_romanian_ci": "utf32",
"utf32_slovenian_ci": "utf32",
"utf32_polish_ci": "utf32",
"utf32_estonian_ci": "utf32",
"utf32_spanish_ci": "utf32",
"utf32_swedish_ci": "utf32",
"utf32_turkish_ci": "utf32",
"utf32_czech_ci": "utf32",
"utf32_danish_ci": "utf32",
"utf32_lithuanian_ci": "utf32",
"utf32_slovak_ci": "utf32",
"utf32_spanish2_ci": "utf32",
"utf32_roman_ci": "utf32",
"utf32_persian_ci": "utf32",
"utf32_esperanto_ci": "utf32",
"utf32_hungarian_ci": "utf32",
"utf32_sinhala_ci": "utf32",
"utf32_german2_ci": "utf32",
"utf32_croatian_ci": "utf32",
"utf32_unicode_520_ci": "utf32",
"utf32_vietnamese_ci": "utf32",
"binary": "binary",
"geostd8_general_ci": "geostd8",
"geostd8_bin": "geostd8",
"cp932_japanese_ci": "cp932",
"cp932_bin": "cp932",
"eucjpms_japanese_ci": "eucjpms",
"eucjpms_bin": "eucjpms",
"gb18030_chinese_ci": "gb18030",
"gb18030_bin": "gb18030",
"gb18030_unicode_520_ci": "gb18030"}
|
flexible
|
{
"blob_id": "5e29c6d1034f6612b0081037f8dc679b49f1dbef",
"index": 2855,
"step-1": "<mask token>\n",
"step-2": "charset = {'big5': ['big5_chinese_ci', 'big5_bin'], 'dec8': [\n 'dec8_swedish_ci', 'dec8_bin'], 'cp850': ['cp850_general_ci',\n 'cp850_bin'], 'hp8': ['hp8_english_ci', 'hp8_bin'], 'koi8r': [\n 'koi8r_general_ci', 'koi8r_bin'], 'latin1': ['latin1_swedish_ci',\n 'latin1_german1_ci', 'latin1_danish_ci', 'latin1_german2_ci',\n 'latin1_bin', 'latin1_general_ci', 'latin1_general_cs',\n 'latin1_spanish_ci'], 'latin2': ['latin2_general_ci', 'latin2_czech_cs',\n 'latin2_hungarian_ci', 'latin2_croatian_ci', 'latin2_bin'], 'swe7': [\n 'swe7_swedish_ci', 'swe7_bin'], 'ascii': ['ascii_general_ci',\n 'ascii_bin'], 'ujis': ['ujis_japanese_ci', 'ujis_bin'], 'sjis': [\n 'sjis_japanese_ci', 'sjis_bin'], 'hebrew': ['hebrew_general_ci',\n 'hebrew_bin'], 'tis620': ['tis620_thai_ci', 'tis620_bin'], 'euckr': [\n 'euckr_korean_ci', 'euckr_bin'], 'koi8u': ['koi8u_general_ci',\n 'koi8u_bin'], 'gb2312': ['gb2312_chinese_ci', 'gb2312_bin'], 'greek': [\n 'greek_general_ci', 'greek_bin'], 'cp1250': ['cp1250_general_ci',\n 'cp1250_czech_cs', 'cp1250_croatian_ci', 'cp1250_bin',\n 'cp1250_polish_ci'], 'gbk': ['gbk_chinese_ci', 'gbk_bin'], 'latin5': [\n 'latin5_turkish_ci', 'latin5_bin'], 'armscii8': ['armscii8_general_ci',\n 'armscii8_bin'], 'utf8': ['utf8_general_ci', 'utf8_bin',\n 'utf8_unicode_ci', 'utf8_icelandic_ci', 'utf8_latvian_ci',\n 'utf8_romanian_ci', 'utf8_slovenian_ci', 'utf8_polish_ci',\n 'utf8_estonian_ci', 'utf8_spanish_ci', 'utf8_swedish_ci',\n 'utf8_turkish_ci', 'utf8_czech_ci', 'utf8_danish_ci',\n 'utf8_lithuanian_ci', 'utf8_slovak_ci', 'utf8_spanish2_ci',\n 'utf8_roman_ci', 'utf8_persian_ci', 'utf8_esperanto_ci',\n 'utf8_hungarian_ci', 'utf8_sinhala_ci', 'utf8_german2_ci',\n 'utf8_croatian_ci', 'utf8_unicode_520_ci', 'utf8_vietnamese_ci',\n 'utf8_general_mysql500_ci'], 'utf8mb4': ['utf8mb4_0900_ai_ci'],\n 'utf8mb3': ['utf8mb3_general_ci'], 'ucs2': ['ucs2_general_ci',\n 'ucs2_bin', 'ucs2_unicode_ci', 'ucs2_icelandic_ci', 'ucs2_latvian_ci',\n 'ucs2_romanian_ci', 'ucs2_slovenian_ci', 'ucs2_polish_ci',\n 'ucs2_estonian_ci', 'ucs2_spanish_ci', 'ucs2_swedish_ci',\n 'ucs2_turkish_ci', 'ucs2_czech_ci', 'ucs2_danish_ci',\n 'ucs2_lithuanian_ci', 'ucs2_slovak_ci', 'ucs2_spanish2_ci',\n 'ucs2_roman_ci', 'ucs2_persian_ci', 'ucs2_esperanto_ci',\n 'ucs2_hungarian_ci', 'ucs2_sinhala_ci', 'ucs2_german2_ci',\n 'ucs2_croatian_ci', 'ucs2_unicode_520_ci', 'ucs2_vietnamese_ci',\n 'ucs2_general_mysql500_ci'], 'cp866': ['cp866_general_ci', 'cp866_bin'],\n 'keybcs2': ['keybcs2_general_ci', 'keybcs2_bin'], 'macce': [\n 'macce_general_ci', 'macce_bin'], 'macroman': ['macroman_general_ci',\n 'macroman_bin'], 'cp852': ['cp852_general_ci', 'cp852_bin'], 'latin7':\n ['latin7_general_ci', 'latin7_estonian_cs', 'latin7_general_cs',\n 'latin7_bin'], 'utf8mb4': ['utf8mb4_general_ci', 'utf8mb4_bin',\n 'utf8mb4_unicode_ci', 'utf8mb4_icelandic_ci', 'utf8mb4_latvian_ci',\n 'utf8mb4_romanian_ci', 'utf8mb4_slovenian_ci', 'utf8mb4_polish_ci',\n 'utf8mb4_estonian_ci', 'utf8mb4_spanish_ci', 'utf8mb4_swedish_ci',\n 'utf8mb4_turkish_ci', 'utf8mb4_czech_ci', 'utf8mb4_danish_ci',\n 'utf8mb4_lithuanian_ci', 'utf8mb4_slovak_ci', 'utf8mb4_spanish2_ci',\n 'utf8mb4_roman_ci', 'utf8mb4_persian_ci', 'utf8mb4_esperanto_ci',\n 'utf8mb4_hungarian_ci', 'utf8mb4_sinhala_ci', 'utf8mb4_german2_ci',\n 'utf8mb4_croatian_ci', 'utf8mb4_unicode_520_ci',\n 'utf8mb4_vietnamese_ci'], 'cp1251': ['cp1251_general_ci',\n 'cp1251_bulgarian_ci', 'cp1251_ukrainian_ci', 'cp1251_bin',\n 'cp1251_general_cs'], 'utf16': ['utf16_general_ci', 'utf16_bin',\n 'utf16_unicode_ci', 'utf16_icelandic_ci', 'utf16_latvian_ci',\n 'utf16_romanian_ci', 'utf16_slovenian_ci', 'utf16_polish_ci',\n 'utf16_estonian_ci', 'utf16_spanish_ci', 'utf16_swedish_ci',\n 'utf16_turkish_ci', 'utf16_czech_ci', 'utf16_danish_ci',\n 'utf16_lithuanian_ci', 'utf16_slovak_ci', 'utf16_spanish2_ci',\n 'utf16_roman_ci', 'utf16_persian_ci', 'utf16_esperanto_ci',\n 'utf16_hungarian_ci', 'utf16_sinhala_ci', 'utf16_german2_ci',\n 'utf16_croatian_ci', 'utf16_unicode_520_ci', 'utf16_vietnamese_ci'],\n 'utf16le': ['utf16le_general_ci', 'utf16le_bin'], 'cp1256': [\n 'cp1256_general_ci', 'cp1256_bin'], 'cp1257': ['cp1257_general_ci',\n 'cp1257_lithuanian_ci', 'cp1257_bin'], 'utf32': ['utf32_general_ci',\n 'utf32_bin', 'utf32_unicode_ci', 'utf32_icelandic_ci',\n 'utf32_latvian_ci', 'utf32_romanian_ci', 'utf32_slovenian_ci',\n 'utf32_polish_ci', 'utf32_estonian_ci', 'utf32_spanish_ci',\n 'utf32_swedish_ci', 'utf32_turkish_ci', 'utf32_czech_ci',\n 'utf32_danish_ci', 'utf32_lithuanian_ci', 'utf32_slovak_ci',\n 'utf32_spanish2_ci', 'utf32_roman_ci', 'utf32_persian_ci',\n 'utf32_esperanto_ci', 'utf32_hungarian_ci', 'utf32_sinhala_ci',\n 'utf32_german2_ci', 'utf32_croatian_ci', 'utf32_unicode_520_ci',\n 'utf32_vietnamese_ci'], 'binary': ['binary'], 'geostd8': [\n 'geostd8_general_ci', 'geostd8_bin'], 'cp932': ['cp932_japanese_ci',\n 'cp932_bin'], 'eucjpms': ['eucjpms_japanese_ci', 'eucjpms_bin'],\n 'gb18030': ['gb18030_chinese_ci', 'gb18030_bin', 'gb18030_unicode_520_ci']}\ncollation = {'big5_chinese_ci': 'big5', 'big5_bin': 'big5',\n 'dec8_swedish_ci': 'dec8', 'dec8_bin': 'dec8', 'cp850_general_ci':\n 'cp850', 'cp850_bin': 'cp850', 'hp8_english_ci': 'hp8', 'hp8_bin':\n 'hp8', 'koi8r_general_ci': 'koi8r', 'koi8r_bin': 'koi8r',\n 'latin1_german1_ci': 'latin1', 'latin1_swedish_ci': 'latin1',\n 'latin1_danish_ci': 'latin1', 'latin1_german2_ci': 'latin1',\n 'latin1_bin': 'latin1', 'latin1_general_ci': 'latin1',\n 'latin1_general_cs': 'latin1', 'latin1_spanish_ci': 'latin1',\n 'latin2_czech_cs': 'latin2', 'latin2_general_ci': 'latin2',\n 'latin2_hungarian_ci': 'latin2', 'latin2_croatian_ci': 'latin2',\n 'latin2_bin': 'latin2', 'swe7_swedish_ci': 'swe7', 'swe7_bin': 'swe7',\n 'ascii_general_ci': 'ascii', 'ascii_bin': 'ascii', 'ujis_japanese_ci':\n 'ujis', 'ujis_bin': 'ujis', 'sjis_japanese_ci': 'sjis', 'sjis_bin':\n 'sjis', 'hebrew_general_ci': 'hebrew', 'hebrew_bin': 'hebrew',\n 'tis620_thai_ci': 'tis620', 'tis620_bin': 'tis620', 'euckr_korean_ci':\n 'euckr', 'euckr_bin': 'euckr', 'koi8u_general_ci': 'koi8u', 'koi8u_bin':\n 'koi8u', 'gb2312_chinese_ci': 'gb2312', 'gb2312_bin': 'gb2312',\n 'greek_general_ci': 'greek', 'greek_bin': 'greek', 'cp1250_general_ci':\n 'cp1250', 'cp1250_czech_cs': 'cp1250', 'cp1250_croatian_ci': 'cp1250',\n 'cp1250_bin': 'cp1250', 'cp1250_polish_ci': 'cp1250', 'gbk_chinese_ci':\n 'gbk', 'gbk_bin': 'gbk', 'latin5_turkish_ci': 'latin5', 'latin5_bin':\n 'latin5', 'armscii8_general_ci': 'armscii8', 'armscii8_bin': 'armscii8',\n 'utf8_general_ci': 'utf8', 'utf8mb3_general_ci': 'utf8mb3', 'utf8_bin':\n 'utf8', 'utf8_unicode_ci': 'utf8', 'utf8_icelandic_ci': 'utf8',\n 'utf8_latvian_ci': 'utf8', 'utf8_romanian_ci': 'utf8',\n 'utf8_slovenian_ci': 'utf8', 'utf8_polish_ci': 'utf8',\n 'utf8_estonian_ci': 'utf8', 'utf8_spanish_ci': 'utf8',\n 'utf8_swedish_ci': 'utf8', 'utf8_turkish_ci': 'utf8', 'utf8_czech_ci':\n 'utf8', 'utf8_danish_ci': 'utf8', 'utf8_lithuanian_ci': 'utf8',\n 'utf8_slovak_ci': 'utf8', 'utf8_spanish2_ci': 'utf8', 'utf8_roman_ci':\n 'utf8', 'utf8_persian_ci': 'utf8', 'utf8_esperanto_ci': 'utf8',\n 'utf8_hungarian_ci': 'utf8', 'utf8_sinhala_ci': 'utf8',\n 'utf8_german2_ci': 'utf8', 'utf8_croatian_ci': 'utf8',\n 'utf8_unicode_520_ci': 'utf8', 'utf8_vietnamese_ci': 'utf8',\n 'utf8_general_mysql500_ci': 'utf8', 'utf8mb4_0900_ai_ci': 'utf8mb4',\n 'ucs2_general_ci': 'ucs2', 'ucs2_bin': 'ucs2', 'ucs2_unicode_ci':\n 'ucs2', 'ucs2_icelandic_ci': 'ucs2', 'ucs2_latvian_ci': 'ucs2',\n 'ucs2_romanian_ci': 'ucs2', 'ucs2_slovenian_ci': 'ucs2',\n 'ucs2_polish_ci': 'ucs2', 'ucs2_estonian_ci': 'ucs2', 'ucs2_spanish_ci':\n 'ucs2', 'ucs2_swedish_ci': 'ucs2', 'ucs2_turkish_ci': 'ucs2',\n 'ucs2_czech_ci': 'ucs2', 'ucs2_danish_ci': 'ucs2', 'ucs2_lithuanian_ci':\n 'ucs2', 'ucs2_slovak_ci': 'ucs2', 'ucs2_spanish2_ci': 'ucs2',\n 'ucs2_roman_ci': 'ucs2', 'ucs2_persian_ci': 'ucs2', 'ucs2_esperanto_ci':\n 'ucs2', 'ucs2_hungarian_ci': 'ucs2', 'ucs2_sinhala_ci': 'ucs2',\n 'ucs2_german2_ci': 'ucs2', 'ucs2_croatian_ci': 'ucs2',\n 'ucs2_unicode_520_ci': 'ucs2', 'ucs2_vietnamese_ci': 'ucs2',\n 'ucs2_general_mysql500_ci': 'ucs2', 'cp866_general_ci': 'cp866',\n 'cp866_bin': 'cp866', 'keybcs2_general_ci': 'keybcs2', 'keybcs2_bin':\n 'keybcs2', 'macce_general_ci': 'macce', 'macce_bin': 'macce',\n 'macroman_general_ci': 'macroman', 'macroman_bin': 'macroman',\n 'cp852_general_ci': 'cp852', 'cp852_bin': 'cp852', 'latin7_estonian_cs':\n 'latin7', 'latin7_general_ci': 'latin7', 'latin7_general_cs': 'latin7',\n 'latin7_bin': 'latin7', 'utf8mb4_general_ci': 'utf8mb4', 'utf8mb4_bin':\n 'utf8mb4', 'utf8mb4_unicode_ci': 'utf8mb4', 'utf8mb4_icelandic_ci':\n 'utf8mb4', 'utf8mb4_latvian_ci': 'utf8mb4', 'utf8mb4_romanian_ci':\n 'utf8mb4', 'utf8mb4_slovenian_ci': 'utf8mb4', 'utf8mb4_polish_ci':\n 'utf8mb4', 'utf8mb4_estonian_ci': 'utf8mb4', 'utf8mb4_spanish_ci':\n 'utf8mb4', 'utf8mb4_swedish_ci': 'utf8mb4', 'utf8mb4_turkish_ci':\n 'utf8mb4', 'utf8mb4_czech_ci': 'utf8mb4', 'utf8mb4_danish_ci':\n 'utf8mb4', 'utf8mb4_lithuanian_ci': 'utf8mb4', 'utf8mb4_slovak_ci':\n 'utf8mb4', 'utf8mb4_spanish2_ci': 'utf8mb4', 'utf8mb4_roman_ci':\n 'utf8mb4', 'utf8mb4_persian_ci': 'utf8mb4', 'utf8mb4_esperanto_ci':\n 'utf8mb4', 'utf8mb4_hungarian_ci': 'utf8mb4', 'utf8mb4_sinhala_ci':\n 'utf8mb4', 'utf8mb4_german2_ci': 'utf8mb4', 'utf8mb4_croatian_ci':\n 'utf8mb4', 'utf8mb4_unicode_520_ci': 'utf8mb4', 'utf8mb4_vietnamese_ci':\n 'utf8mb4', 'cp1251_bulgarian_ci': 'cp1251', 'cp1251_ukrainian_ci':\n 'cp1251', 'cp1251_bin': 'cp1251', 'cp1251_general_ci': 'cp1251',\n 'cp1251_general_cs': 'cp1251', 'utf16_general_ci': 'utf16', 'utf16_bin':\n 'utf16', 'utf16_unicode_ci': 'utf16', 'utf16_icelandic_ci': 'utf16',\n 'utf16_latvian_ci': 'utf16', 'utf16_romanian_ci': 'utf16',\n 'utf16_slovenian_ci': 'utf16', 'utf16_polish_ci': 'utf16',\n 'utf16_estonian_ci': 'utf16', 'utf16_spanish_ci': 'utf16',\n 'utf16_swedish_ci': 'utf16', 'utf16_turkish_ci': 'utf16',\n 'utf16_czech_ci': 'utf16', 'utf16_danish_ci': 'utf16',\n 'utf16_lithuanian_ci': 'utf16', 'utf16_slovak_ci': 'utf16',\n 'utf16_spanish2_ci': 'utf16', 'utf16_roman_ci': 'utf16',\n 'utf16_persian_ci': 'utf16', 'utf16_esperanto_ci': 'utf16',\n 'utf16_hungarian_ci': 'utf16', 'utf16_sinhala_ci': 'utf16',\n 'utf16_german2_ci': 'utf16', 'utf16_croatian_ci': 'utf16',\n 'utf16_unicode_520_ci': 'utf16', 'utf16_vietnamese_ci': 'utf16',\n 'utf16le_general_ci': 'utf16le', 'utf16le_bin': 'utf16le',\n 'cp1256_general_ci': 'cp1256', 'cp1256_bin': 'cp1256',\n 'cp1257_lithuanian_ci': 'cp1257', 'cp1257_bin': 'cp1257',\n 'cp1257_general_ci': 'cp1257', 'utf32_general_ci': 'utf32', 'utf32_bin':\n 'utf32', 'utf32_unicode_ci': 'utf32', 'utf32_icelandic_ci': 'utf32',\n 'utf32_latvian_ci': 'utf32', 'utf32_romanian_ci': 'utf32',\n 'utf32_slovenian_ci': 'utf32', 'utf32_polish_ci': 'utf32',\n 'utf32_estonian_ci': 'utf32', 'utf32_spanish_ci': 'utf32',\n 'utf32_swedish_ci': 'utf32', 'utf32_turkish_ci': 'utf32',\n 'utf32_czech_ci': 'utf32', 'utf32_danish_ci': 'utf32',\n 'utf32_lithuanian_ci': 'utf32', 'utf32_slovak_ci': 'utf32',\n 'utf32_spanish2_ci': 'utf32', 'utf32_roman_ci': 'utf32',\n 'utf32_persian_ci': 'utf32', 'utf32_esperanto_ci': 'utf32',\n 'utf32_hungarian_ci': 'utf32', 'utf32_sinhala_ci': 'utf32',\n 'utf32_german2_ci': 'utf32', 'utf32_croatian_ci': 'utf32',\n 'utf32_unicode_520_ci': 'utf32', 'utf32_vietnamese_ci': 'utf32',\n 'binary': 'binary', 'geostd8_general_ci': 'geostd8', 'geostd8_bin':\n 'geostd8', 'cp932_japanese_ci': 'cp932', 'cp932_bin': 'cp932',\n 'eucjpms_japanese_ci': 'eucjpms', 'eucjpms_bin': 'eucjpms',\n 'gb18030_chinese_ci': 'gb18030', 'gb18030_bin': 'gb18030',\n 'gb18030_unicode_520_ci': 'gb18030'}\n",
"step-3": "# Copyright 2016 Tesora, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\ncharset = {\"big5\": [\"big5_chinese_ci\", \"big5_bin\"],\n \"dec8\": [\"dec8_swedish_ci\", \"dec8_bin\"],\n \"cp850\": [\"cp850_general_ci\", \"cp850_bin\"],\n \"hp8\": [\"hp8_english_ci\", \"hp8_bin\"],\n \"koi8r\": [\"koi8r_general_ci\", \"koi8r_bin\"],\n \"latin1\": [\"latin1_swedish_ci\",\n \"latin1_german1_ci\",\n \"latin1_danish_ci\",\n \"latin1_german2_ci\",\n \"latin1_bin\",\n \"latin1_general_ci\",\n \"latin1_general_cs\",\n \"latin1_spanish_ci\"],\n \"latin2\": [\"latin2_general_ci\",\n \"latin2_czech_cs\",\n \"latin2_hungarian_ci\",\n \"latin2_croatian_ci\",\n \"latin2_bin\"],\n \"swe7\": [\"swe7_swedish_ci\", \"swe7_bin\"],\n \"ascii\": [\"ascii_general_ci\", \"ascii_bin\"],\n \"ujis\": [\"ujis_japanese_ci\", \"ujis_bin\"],\n \"sjis\": [\"sjis_japanese_ci\", \"sjis_bin\"],\n \"hebrew\": [\"hebrew_general_ci\", \"hebrew_bin\"],\n \"tis620\": [\"tis620_thai_ci\", \"tis620_bin\"],\n \"euckr\": [\"euckr_korean_ci\", \"euckr_bin\"],\n \"koi8u\": [\"koi8u_general_ci\", \"koi8u_bin\"],\n \"gb2312\": [\"gb2312_chinese_ci\", \"gb2312_bin\"],\n \"greek\": [\"greek_general_ci\", \"greek_bin\"],\n \"cp1250\": [\"cp1250_general_ci\",\n \"cp1250_czech_cs\",\n \"cp1250_croatian_ci\",\n \"cp1250_bin\",\n \"cp1250_polish_ci\"],\n \"gbk\": [\"gbk_chinese_ci\", \"gbk_bin\"],\n \"latin5\": [\"latin5_turkish_ci\", \"latin5_bin\"],\n \"armscii8\": [\"armscii8_general_ci\", \"armscii8_bin\"],\n \"utf8\": [\"utf8_general_ci\",\n \"utf8_bin\",\n \"utf8_unicode_ci\",\n \"utf8_icelandic_ci\",\n \"utf8_latvian_ci\",\n \"utf8_romanian_ci\",\n \"utf8_slovenian_ci\",\n \"utf8_polish_ci\",\n \"utf8_estonian_ci\",\n \"utf8_spanish_ci\",\n \"utf8_swedish_ci\",\n \"utf8_turkish_ci\",\n \"utf8_czech_ci\",\n \"utf8_danish_ci\",\n \"utf8_lithuanian_ci\",\n \"utf8_slovak_ci\",\n \"utf8_spanish2_ci\",\n \"utf8_roman_ci\",\n \"utf8_persian_ci\",\n \"utf8_esperanto_ci\",\n \"utf8_hungarian_ci\",\n \"utf8_sinhala_ci\",\n \"utf8_german2_ci\",\n \"utf8_croatian_ci\",\n \"utf8_unicode_520_ci\",\n \"utf8_vietnamese_ci\",\n \"utf8_general_mysql500_ci\"\n ],\n \"utf8mb4\": [\"utf8mb4_0900_ai_ci\"],\n \"utf8mb3\": [\"utf8mb3_general_ci\"],\n \"ucs2\": [\"ucs2_general_ci\",\n \"ucs2_bin\",\n \"ucs2_unicode_ci\",\n \"ucs2_icelandic_ci\",\n \"ucs2_latvian_ci\",\n \"ucs2_romanian_ci\",\n \"ucs2_slovenian_ci\",\n \"ucs2_polish_ci\",\n \"ucs2_estonian_ci\",\n \"ucs2_spanish_ci\",\n \"ucs2_swedish_ci\",\n \"ucs2_turkish_ci\",\n \"ucs2_czech_ci\",\n \"ucs2_danish_ci\",\n \"ucs2_lithuanian_ci\",\n \"ucs2_slovak_ci\",\n \"ucs2_spanish2_ci\",\n \"ucs2_roman_ci\",\n \"ucs2_persian_ci\",\n \"ucs2_esperanto_ci\",\n \"ucs2_hungarian_ci\",\n \"ucs2_sinhala_ci\",\n \"ucs2_german2_ci\",\n \"ucs2_croatian_ci\",\n \"ucs2_unicode_520_ci\",\n \"ucs2_vietnamese_ci\",\n \"ucs2_general_mysql500_ci\"\n ],\n \"cp866\": [\"cp866_general_ci\", \"cp866_bin\"],\n \"keybcs2\": [\"keybcs2_general_ci\", \"keybcs2_bin\"],\n \"macce\": [\"macce_general_ci\", \"macce_bin\"],\n \"macroman\": [\"macroman_general_ci\", \"macroman_bin\"],\n \"cp852\": [\"cp852_general_ci\", \"cp852_bin\"],\n \"latin7\": [\"latin7_general_ci\",\n \"latin7_estonian_cs\",\n \"latin7_general_cs\",\n \"latin7_bin\"],\n \"utf8mb4\": [\"utf8mb4_general_ci\",\n \"utf8mb4_bin\",\n \"utf8mb4_unicode_ci\",\n \"utf8mb4_icelandic_ci\",\n \"utf8mb4_latvian_ci\",\n \"utf8mb4_romanian_ci\",\n \"utf8mb4_slovenian_ci\",\n \"utf8mb4_polish_ci\",\n \"utf8mb4_estonian_ci\",\n \"utf8mb4_spanish_ci\",\n \"utf8mb4_swedish_ci\",\n \"utf8mb4_turkish_ci\",\n \"utf8mb4_czech_ci\",\n \"utf8mb4_danish_ci\",\n \"utf8mb4_lithuanian_ci\",\n \"utf8mb4_slovak_ci\",\n \"utf8mb4_spanish2_ci\",\n \"utf8mb4_roman_ci\",\n \"utf8mb4_persian_ci\",\n \"utf8mb4_esperanto_ci\",\n \"utf8mb4_hungarian_ci\",\n \"utf8mb4_sinhala_ci\",\n \"utf8mb4_german2_ci\",\n \"utf8mb4_croatian_ci\",\n \"utf8mb4_unicode_520_ci\",\n \"utf8mb4_vietnamese_ci\"],\n \"cp1251\": [\"cp1251_general_ci\",\n \"cp1251_bulgarian_ci\",\n \"cp1251_ukrainian_ci\",\n \"cp1251_bin\",\n \"cp1251_general_cs\"],\n \"utf16\": [\"utf16_general_ci\",\n \"utf16_bin\",\n \"utf16_unicode_ci\",\n \"utf16_icelandic_ci\",\n \"utf16_latvian_ci\",\n \"utf16_romanian_ci\",\n \"utf16_slovenian_ci\",\n \"utf16_polish_ci\",\n \"utf16_estonian_ci\",\n \"utf16_spanish_ci\",\n \"utf16_swedish_ci\",\n \"utf16_turkish_ci\",\n \"utf16_czech_ci\",\n \"utf16_danish_ci\",\n \"utf16_lithuanian_ci\",\n \"utf16_slovak_ci\",\n \"utf16_spanish2_ci\",\n \"utf16_roman_ci\",\n \"utf16_persian_ci\",\n \"utf16_esperanto_ci\",\n \"utf16_hungarian_ci\",\n \"utf16_sinhala_ci\",\n \"utf16_german2_ci\",\n \"utf16_croatian_ci\",\n \"utf16_unicode_520_ci\",\n \"utf16_vietnamese_ci\"],\n \"utf16le\": [\"utf16le_general_ci\",\n \"utf16le_bin\"],\n \"cp1256\": [\"cp1256_general_ci\", \"cp1256_bin\"],\n \"cp1257\": [\"cp1257_general_ci\",\n \"cp1257_lithuanian_ci\",\n \"cp1257_bin\"],\n \"utf32\": [\"utf32_general_ci\",\n \"utf32_bin\",\n \"utf32_unicode_ci\",\n \"utf32_icelandic_ci\",\n \"utf32_latvian_ci\",\n \"utf32_romanian_ci\",\n \"utf32_slovenian_ci\",\n \"utf32_polish_ci\",\n \"utf32_estonian_ci\",\n \"utf32_spanish_ci\",\n \"utf32_swedish_ci\",\n \"utf32_turkish_ci\",\n \"utf32_czech_ci\",\n \"utf32_danish_ci\",\n \"utf32_lithuanian_ci\",\n \"utf32_slovak_ci\",\n \"utf32_spanish2_ci\",\n \"utf32_roman_ci\",\n \"utf32_persian_ci\",\n \"utf32_esperanto_ci\",\n \"utf32_hungarian_ci\",\n \"utf32_sinhala_ci\",\n \"utf32_german2_ci\",\n \"utf32_croatian_ci\",\n \"utf32_unicode_520_ci\",\n \"utf32_vietnamese_ci\"],\n \"binary\": [\"binary\"],\n \"geostd8\": [\"geostd8_general_ci\", \"geostd8_bin\"],\n \"cp932\": [\"cp932_japanese_ci\", \"cp932_bin\"],\n \"eucjpms\": [\"eucjpms_japanese_ci\", \"eucjpms_bin\"],\n \"gb18030\": [\"gb18030_chinese_ci\",\n \"gb18030_bin\",\n \"gb18030_unicode_520_ci\"]}\n\ncollation = {\"big5_chinese_ci\": \"big5\",\n \"big5_bin\": \"big5\",\n \"dec8_swedish_ci\": \"dec8\",\n \"dec8_bin\": \"dec8\",\n \"cp850_general_ci\": \"cp850\",\n \"cp850_bin\": \"cp850\",\n \"hp8_english_ci\": \"hp8\",\n \"hp8_bin\": \"hp8\",\n \"koi8r_general_ci\": \"koi8r\",\n \"koi8r_bin\": \"koi8r\",\n \"latin1_german1_ci\": \"latin1\",\n \"latin1_swedish_ci\": \"latin1\",\n \"latin1_danish_ci\": \"latin1\",\n \"latin1_german2_ci\": \"latin1\",\n \"latin1_bin\": \"latin1\",\n \"latin1_general_ci\": \"latin1\",\n \"latin1_general_cs\": \"latin1\",\n \"latin1_spanish_ci\": \"latin1\",\n \"latin2_czech_cs\": \"latin2\",\n \"latin2_general_ci\": \"latin2\",\n \"latin2_hungarian_ci\": \"latin2\",\n \"latin2_croatian_ci\": \"latin2\",\n \"latin2_bin\": \"latin2\",\n \"swe7_swedish_ci\": \"swe7\",\n \"swe7_bin\": \"swe7\",\n \"ascii_general_ci\": \"ascii\",\n \"ascii_bin\": \"ascii\",\n \"ujis_japanese_ci\": \"ujis\",\n \"ujis_bin\": \"ujis\",\n \"sjis_japanese_ci\": \"sjis\",\n \"sjis_bin\": \"sjis\",\n \"hebrew_general_ci\": \"hebrew\",\n \"hebrew_bin\": \"hebrew\",\n \"tis620_thai_ci\": \"tis620\",\n \"tis620_bin\": \"tis620\",\n \"euckr_korean_ci\": \"euckr\",\n \"euckr_bin\": \"euckr\",\n \"koi8u_general_ci\": \"koi8u\",\n \"koi8u_bin\": \"koi8u\",\n \"gb2312_chinese_ci\": \"gb2312\",\n \"gb2312_bin\": \"gb2312\",\n \"greek_general_ci\": \"greek\",\n \"greek_bin\": \"greek\",\n \"cp1250_general_ci\": \"cp1250\",\n \"cp1250_czech_cs\": \"cp1250\",\n \"cp1250_croatian_ci\": \"cp1250\",\n \"cp1250_bin\": \"cp1250\",\n \"cp1250_polish_ci\": \"cp1250\",\n \"gbk_chinese_ci\": \"gbk\",\n \"gbk_bin\": \"gbk\",\n \"latin5_turkish_ci\": \"latin5\",\n \"latin5_bin\": \"latin5\",\n \"armscii8_general_ci\": \"armscii8\",\n \"armscii8_bin\": \"armscii8\",\n \"utf8_general_ci\": \"utf8\",\n \"utf8mb3_general_ci\": \"utf8mb3\",\n \"utf8_bin\": \"utf8\",\n \"utf8_unicode_ci\": \"utf8\",\n \"utf8_icelandic_ci\": \"utf8\",\n \"utf8_latvian_ci\": \"utf8\",\n \"utf8_romanian_ci\": \"utf8\",\n \"utf8_slovenian_ci\": \"utf8\",\n \"utf8_polish_ci\": \"utf8\",\n \"utf8_estonian_ci\": \"utf8\",\n \"utf8_spanish_ci\": \"utf8\",\n \"utf8_swedish_ci\": \"utf8\",\n \"utf8_turkish_ci\": \"utf8\",\n \"utf8_czech_ci\": \"utf8\",\n \"utf8_danish_ci\": \"utf8\",\n \"utf8_lithuanian_ci\": \"utf8\",\n \"utf8_slovak_ci\": \"utf8\",\n \"utf8_spanish2_ci\": \"utf8\",\n \"utf8_roman_ci\": \"utf8\",\n \"utf8_persian_ci\": \"utf8\",\n \"utf8_esperanto_ci\": \"utf8\",\n \"utf8_hungarian_ci\": \"utf8\",\n \"utf8_sinhala_ci\": \"utf8\",\n \"utf8_german2_ci\": \"utf8\",\n \"utf8_croatian_ci\": \"utf8\",\n \"utf8_unicode_520_ci\": \"utf8\",\n \"utf8_vietnamese_ci\": \"utf8\",\n \"utf8_general_mysql500_ci\": \"utf8\",\n \"utf8mb4_0900_ai_ci\": \"utf8mb4\",\n \"ucs2_general_ci\": \"ucs2\",\n \"ucs2_bin\": \"ucs2\",\n \"ucs2_unicode_ci\": \"ucs2\",\n \"ucs2_icelandic_ci\": \"ucs2\",\n \"ucs2_latvian_ci\": \"ucs2\",\n \"ucs2_romanian_ci\": \"ucs2\",\n \"ucs2_slovenian_ci\": \"ucs2\",\n \"ucs2_polish_ci\": \"ucs2\",\n \"ucs2_estonian_ci\": \"ucs2\",\n \"ucs2_spanish_ci\": \"ucs2\",\n \"ucs2_swedish_ci\": \"ucs2\",\n \"ucs2_turkish_ci\": \"ucs2\",\n \"ucs2_czech_ci\": \"ucs2\",\n \"ucs2_danish_ci\": \"ucs2\",\n \"ucs2_lithuanian_ci\": \"ucs2\",\n \"ucs2_slovak_ci\": \"ucs2\",\n \"ucs2_spanish2_ci\": \"ucs2\",\n \"ucs2_roman_ci\": \"ucs2\",\n \"ucs2_persian_ci\": \"ucs2\",\n \"ucs2_esperanto_ci\": \"ucs2\",\n \"ucs2_hungarian_ci\": \"ucs2\",\n \"ucs2_sinhala_ci\": \"ucs2\",\n \"ucs2_german2_ci\": \"ucs2\",\n \"ucs2_croatian_ci\": \"ucs2\",\n \"ucs2_unicode_520_ci\": \"ucs2\",\n \"ucs2_vietnamese_ci\": \"ucs2\",\n \"ucs2_general_mysql500_ci\": \"ucs2\",\n \"cp866_general_ci\": \"cp866\",\n \"cp866_bin\": \"cp866\",\n \"keybcs2_general_ci\": \"keybcs2\",\n \"keybcs2_bin\": \"keybcs2\",\n \"macce_general_ci\": \"macce\",\n \"macce_bin\": \"macce\",\n \"macroman_general_ci\": \"macroman\",\n \"macroman_bin\": \"macroman\",\n \"cp852_general_ci\": \"cp852\",\n \"cp852_bin\": \"cp852\",\n \"latin7_estonian_cs\": \"latin7\",\n \"latin7_general_ci\": \"latin7\",\n \"latin7_general_cs\": \"latin7\",\n \"latin7_bin\": \"latin7\",\n \"utf8mb4_general_ci\": \"utf8mb4\",\n \"utf8mb4_bin\": \"utf8mb4\",\n \"utf8mb4_unicode_ci\": \"utf8mb4\",\n \"utf8mb4_icelandic_ci\": \"utf8mb4\",\n \"utf8mb4_latvian_ci\": \"utf8mb4\",\n \"utf8mb4_romanian_ci\": \"utf8mb4\",\n \"utf8mb4_slovenian_ci\": \"utf8mb4\",\n \"utf8mb4_polish_ci\": \"utf8mb4\",\n \"utf8mb4_estonian_ci\": \"utf8mb4\",\n \"utf8mb4_spanish_ci\": \"utf8mb4\",\n \"utf8mb4_swedish_ci\": \"utf8mb4\",\n \"utf8mb4_turkish_ci\": \"utf8mb4\",\n \"utf8mb4_czech_ci\": \"utf8mb4\",\n \"utf8mb4_danish_ci\": \"utf8mb4\",\n \"utf8mb4_lithuanian_ci\": \"utf8mb4\",\n \"utf8mb4_slovak_ci\": \"utf8mb4\",\n \"utf8mb4_spanish2_ci\": \"utf8mb4\",\n \"utf8mb4_roman_ci\": \"utf8mb4\",\n \"utf8mb4_persian_ci\": \"utf8mb4\",\n \"utf8mb4_esperanto_ci\": \"utf8mb4\",\n \"utf8mb4_hungarian_ci\": \"utf8mb4\",\n \"utf8mb4_sinhala_ci\": \"utf8mb4\",\n \"utf8mb4_german2_ci\": \"utf8mb4\",\n \"utf8mb4_croatian_ci\": \"utf8mb4\",\n \"utf8mb4_unicode_520_ci\": \"utf8mb4\",\n \"utf8mb4_vietnamese_ci\": \"utf8mb4\",\n \"cp1251_bulgarian_ci\": \"cp1251\",\n \"cp1251_ukrainian_ci\": \"cp1251\",\n \"cp1251_bin\": \"cp1251\",\n \"cp1251_general_ci\": \"cp1251\",\n \"cp1251_general_cs\": \"cp1251\",\n \"utf16_general_ci\": \"utf16\",\n \"utf16_bin\": \"utf16\",\n \"utf16_unicode_ci\": \"utf16\",\n \"utf16_icelandic_ci\": \"utf16\",\n \"utf16_latvian_ci\": \"utf16\",\n \"utf16_romanian_ci\": \"utf16\",\n \"utf16_slovenian_ci\": \"utf16\",\n \"utf16_polish_ci\": \"utf16\",\n \"utf16_estonian_ci\": \"utf16\",\n \"utf16_spanish_ci\": \"utf16\",\n \"utf16_swedish_ci\": \"utf16\",\n \"utf16_turkish_ci\": \"utf16\",\n \"utf16_czech_ci\": \"utf16\",\n \"utf16_danish_ci\": \"utf16\",\n \"utf16_lithuanian_ci\": \"utf16\",\n \"utf16_slovak_ci\": \"utf16\",\n \"utf16_spanish2_ci\": \"utf16\",\n \"utf16_roman_ci\": \"utf16\",\n \"utf16_persian_ci\": \"utf16\",\n \"utf16_esperanto_ci\": \"utf16\",\n \"utf16_hungarian_ci\": \"utf16\",\n \"utf16_sinhala_ci\": \"utf16\",\n \"utf16_german2_ci\": \"utf16\",\n \"utf16_croatian_ci\": \"utf16\",\n \"utf16_unicode_520_ci\": \"utf16\",\n \"utf16_vietnamese_ci\": \"utf16\",\n \"utf16le_general_ci\": \"utf16le\",\n \"utf16le_bin\": \"utf16le\",\n \"cp1256_general_ci\": \"cp1256\",\n \"cp1256_bin\": \"cp1256\",\n \"cp1257_lithuanian_ci\": \"cp1257\",\n \"cp1257_bin\": \"cp1257\",\n \"cp1257_general_ci\": \"cp1257\",\n \"utf32_general_ci\": \"utf32\",\n \"utf32_bin\": \"utf32\",\n \"utf32_unicode_ci\": \"utf32\",\n \"utf32_icelandic_ci\": \"utf32\",\n \"utf32_latvian_ci\": \"utf32\",\n \"utf32_romanian_ci\": \"utf32\",\n \"utf32_slovenian_ci\": \"utf32\",\n \"utf32_polish_ci\": \"utf32\",\n \"utf32_estonian_ci\": \"utf32\",\n \"utf32_spanish_ci\": \"utf32\",\n \"utf32_swedish_ci\": \"utf32\",\n \"utf32_turkish_ci\": \"utf32\",\n \"utf32_czech_ci\": \"utf32\",\n \"utf32_danish_ci\": \"utf32\",\n \"utf32_lithuanian_ci\": \"utf32\",\n \"utf32_slovak_ci\": \"utf32\",\n \"utf32_spanish2_ci\": \"utf32\",\n \"utf32_roman_ci\": \"utf32\",\n \"utf32_persian_ci\": \"utf32\",\n \"utf32_esperanto_ci\": \"utf32\",\n \"utf32_hungarian_ci\": \"utf32\",\n \"utf32_sinhala_ci\": \"utf32\",\n \"utf32_german2_ci\": \"utf32\",\n \"utf32_croatian_ci\": \"utf32\",\n \"utf32_unicode_520_ci\": \"utf32\",\n \"utf32_vietnamese_ci\": \"utf32\",\n \"binary\": \"binary\",\n \"geostd8_general_ci\": \"geostd8\",\n \"geostd8_bin\": \"geostd8\",\n \"cp932_japanese_ci\": \"cp932\",\n \"cp932_bin\": \"cp932\",\n \"eucjpms_japanese_ci\": \"eucjpms\",\n \"eucjpms_bin\": \"eucjpms\",\n \"gb18030_chinese_ci\": \"gb18030\",\n \"gb18030_bin\": \"gb18030\",\n \"gb18030_unicode_520_ci\": \"gb18030\"}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib import cm
import imageio
# # Backpack values
# fx = 7190.247 # lense focal length
# baseline = 174.945 # distance in mm between the two cameras (values from middlebury)
# units = 0.001 # depth units
# doffs=342.523 # x-difference of principal points, following https://vision.middlebury.edu/stereo/data/scenes2014/#description
# texture_threshold = 2000 # 10 by default
# Classroom values
doffs=113.186
baseline=237.604
fx = 3920.793
doffs=113.186
disparities=0
block=23
# # Backpack images
# imgL = cv2.imread('images/im0_left.png', cv2.IMREAD_GRAYSCALE)
# imgR = cv2.imread('images/im0_right.png', cv2.IMREAD_GRAYSCALE)
# Classroom images
imgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)
imgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)
plt.imshow(imgL, cmap="gray")
plt.axis('off')
plt.show()
sbm = cv2.StereoBM_create(numDisparities=disparities,blockSize=block)
# sbm.setTextureThreshold(texture_threshold)
# calculate disparities
disparity = sbm.compute(imgL, imgR)
print(disparity)
# show disparity
plt.imshow(disparity)
plt.axis('off')
plt.show()
depth = np.zeros(shape=imgL.shape).astype(float)
depth[disparity > 0] = (fx * baseline) / (doffs + disparity[disparity > 0])
plt.imshow(depth)
plt.show()
# convert from pfm file equation?
pfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')
pfm = np.asarray(pfm)
plt.imshow(pfm)
plt.show()
depth = np.zeros(shape=imgL.shape).astype(float)
depth[pfm > 0] = (fx * baseline) / (doffs + pfm[pfm > 0])
#print(depth)
plt.imshow(depth)
plt.axis('off')
plt.show()
|
normal
|
{
"blob_id": "14761cc2593556f58a7dc4e499db71456d7c7048",
"index": 3237,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.imshow(imgL, cmap='gray')\nplt.axis('off')\nplt.show()\n<mask token>\nprint(disparity)\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\n<mask token>\nplt.imshow(depth)\nplt.show()\n<mask token>\nplt.imshow(pfm)\nplt.show()\n<mask token>\nplt.imshow(depth)\nplt.axis('off')\nplt.show()\n",
"step-3": "<mask token>\ndoffs = 113.186\nbaseline = 237.604\nfx = 3920.793\ndoffs = 113.186\ndisparities = 0\nblock = 23\nimgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)\nimgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)\nplt.imshow(imgL, cmap='gray')\nplt.axis('off')\nplt.show()\nsbm = cv2.StereoBM_create(numDisparities=disparities, blockSize=block)\ndisparity = sbm.compute(imgL, imgR)\nprint(disparity)\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[disparity > 0] = fx * baseline / (doffs + disparity[disparity > 0])\nplt.imshow(depth)\nplt.show()\npfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')\npfm = np.asarray(pfm)\nplt.imshow(pfm)\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[pfm > 0] = fx * baseline / (doffs + pfm[pfm > 0])\nplt.imshow(depth)\nplt.axis('off')\nplt.show()\n",
"step-4": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport imageio\ndoffs = 113.186\nbaseline = 237.604\nfx = 3920.793\ndoffs = 113.186\ndisparities = 0\nblock = 23\nimgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)\nimgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)\nplt.imshow(imgL, cmap='gray')\nplt.axis('off')\nplt.show()\nsbm = cv2.StereoBM_create(numDisparities=disparities, blockSize=block)\ndisparity = sbm.compute(imgL, imgR)\nprint(disparity)\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[disparity > 0] = fx * baseline / (doffs + disparity[disparity > 0])\nplt.imshow(depth)\nplt.show()\npfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')\npfm = np.asarray(pfm)\nplt.imshow(pfm)\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[pfm > 0] = fx * baseline / (doffs + pfm[pfm > 0])\nplt.imshow(depth)\nplt.axis('off')\nplt.show()\n",
"step-5": "import numpy as np\nimport cv2 \nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport imageio\n\n# # Backpack values\n# fx = 7190.247 # lense focal length\n# baseline = 174.945 # distance in mm between the two cameras (values from middlebury)\n# units = 0.001 # depth units\n# doffs=342.523 # x-difference of principal points, following https://vision.middlebury.edu/stereo/data/scenes2014/#description\n\n# texture_threshold = 2000 # 10 by default\n\n# Classroom values\ndoffs=113.186\nbaseline=237.604\nfx = 3920.793\ndoffs=113.186\n\ndisparities=0\nblock=23\n\n# # Backpack images\n# imgL = cv2.imread('images/im0_left.png', cv2.IMREAD_GRAYSCALE)\n# imgR = cv2.imread('images/im0_right.png', cv2.IMREAD_GRAYSCALE)\n\n# Classroom images\nimgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)\nimgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)\n\nplt.imshow(imgL, cmap=\"gray\")\nplt.axis('off')\nplt.show()\n\nsbm = cv2.StereoBM_create(numDisparities=disparities,blockSize=block)\n# sbm.setTextureThreshold(texture_threshold)\n\n\n# calculate disparities\ndisparity = sbm.compute(imgL, imgR)\nprint(disparity)\n# show disparity\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\n\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[disparity > 0] = (fx * baseline) / (doffs + disparity[disparity > 0])\n\nplt.imshow(depth)\nplt.show()\n\n\n# convert from pfm file equation?\npfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')\npfm = np.asarray(pfm)\nplt.imshow(pfm)\nplt.show()\n\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[pfm > 0] = (fx * baseline) / (doffs + pfm[pfm > 0])\n#print(depth)\n\nplt.imshow(depth)\nplt.axis('off')\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
<|reserved_special_token_1|>
<|reserved_special_token_0|>
schema_view = get_swagger_view(title='API')
<|reserved_special_token_0|>
urlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view
), path('auth/login/', auth_views.LoginView.as_view(template_name=
'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view
()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',
include('apps.polls.urls'))]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='API')
from django.contrib.auth import views as auth_views
urlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view
), path('auth/login/', auth_views.LoginView.as_view(template_name=
'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view
()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',
include('apps.polls.urls'))]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='API')
from django.contrib.auth import views as auth_views
urlpatterns = [
path('django-admin/', admin.site.urls),
path('', schema_view),
path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')),
path('auth/logout/', auth_views.LogoutView.as_view()),
path('api/auth/', include('apps.auth.urls')),
path('api/polls/', include('apps.polls.urls')),
]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
|
flexible
|
{
"blob_id": "987d6c769a4f593405e889ed2b0e3f9955900406",
"index": 856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-3": "<mask token>\nschema_view = get_swagger_view(title='API')\n<mask token>\nurlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view\n ), path('auth/login/', auth_views.LoginView.as_view(template_name=\n 'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view\n ()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',\n include('apps.polls.urls'))]\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='API')\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view\n ), path('auth/login/', auth_views.LoginView.as_view(template_name=\n 'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view\n ()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',\n include('apps.polls.urls'))]\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-5": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\n\nfrom rest_framework_swagger.views import get_swagger_view\n\nschema_view = get_swagger_view(title='API')\n\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n path('django-admin/', admin.site.urls),\n path('', schema_view),\n path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')),\n path('auth/logout/', auth_views.LogoutView.as_view()),\n path('api/auth/', include('apps.auth.urls')),\n path('api/polls/', include('apps.polls.urls')),\n]\n\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def part2(number):
if number == target:
return 1
if number in memoize.keys():
return memoize[number]
paths = 0
if number + 1 in n:
paths += part2(number + 1)
if number + 2 in n:
paths += part2(number + 2)
if number + 3 in n:
paths += part2(number + 3)
memoize[number] = paths
print(number, paths)
return paths
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('input.txt') as f:
numbers = f.read().split('\n')
<|reserved_special_token_0|>
n.insert(0, 0)
n.append(n[-1] + 3)
<|reserved_special_token_0|>
def part2(number):
if number == target:
return 1
if number in memoize.keys():
return memoize[number]
paths = 0
if number + 1 in n:
paths += part2(number + 1)
if number + 2 in n:
paths += part2(number + 2)
if number + 3 in n:
paths += part2(number + 3)
memoize[number] = paths
print(number, paths)
return paths
print('Total:', part2(0))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('input.txt') as f:
numbers = f.read().split('\n')
n = sorted(list(map(lambda x: int(x), numbers)))
n.insert(0, 0)
n.append(n[-1] + 3)
target = n[-1]
memoize = {}
def part2(number):
if number == target:
return 1
if number in memoize.keys():
return memoize[number]
paths = 0
if number + 1 in n:
paths += part2(number + 1)
if number + 2 in n:
paths += part2(number + 2)
if number + 3 in n:
paths += part2(number + 3)
memoize[number] = paths
print(number, paths)
return paths
print('Total:', part2(0))
<|reserved_special_token_1|>
from functools import reduce
with open('input.txt') as f:
numbers = f.read().split('\n')
n = sorted(list(map(lambda x: int(x), numbers)))
n.insert(0, 0)
n.append(n[-1] + 3)
target = n[-1]
memoize = {}
def part2(number):
if number == target:
return 1
if number in memoize.keys():
return memoize[number]
paths = 0
if number + 1 in n:
paths += part2(number + 1)
if number + 2 in n:
paths += part2(number + 2)
if number + 3 in n:
paths += part2(number + 3)
memoize[number] = paths
print(number, paths)
return paths
print('Total:', part2(0))
<|reserved_special_token_1|>
from functools import reduce
with open("input.txt") as f:
numbers = f.read().split("\n")
n = sorted(list(map(lambda x: int(x), numbers)))
n.insert(0, 0)
n.append(n[-1] + 3)
target = n[-1]
memoize = {}
def part2(number):
if number == target:
return 1
if number in memoize.keys():
return memoize[number]
paths = 0
if number + 1 in n:
paths += part2(number + 1)
if number + 2 in n:
paths += part2(number + 2)
if number + 3 in n:
paths += part2(number + 3)
memoize[number] = paths
print(number, paths)
return paths
print("Total:", part2(0))
|
flexible
|
{
"blob_id": "3179c13968f7bcdccbd00ea35b9f098dc49b42d8",
"index": 4450,
"step-1": "<mask token>\n\n\ndef part2(number):\n if number == target:\n return 1\n if number in memoize.keys():\n return memoize[number]\n paths = 0\n if number + 1 in n:\n paths += part2(number + 1)\n if number + 2 in n:\n paths += part2(number + 2)\n if number + 3 in n:\n paths += part2(number + 3)\n memoize[number] = paths\n print(number, paths)\n return paths\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('input.txt') as f:\n numbers = f.read().split('\\n')\n<mask token>\nn.insert(0, 0)\nn.append(n[-1] + 3)\n<mask token>\n\n\ndef part2(number):\n if number == target:\n return 1\n if number in memoize.keys():\n return memoize[number]\n paths = 0\n if number + 1 in n:\n paths += part2(number + 1)\n if number + 2 in n:\n paths += part2(number + 2)\n if number + 3 in n:\n paths += part2(number + 3)\n memoize[number] = paths\n print(number, paths)\n return paths\n\n\nprint('Total:', part2(0))\n",
"step-3": "<mask token>\nwith open('input.txt') as f:\n numbers = f.read().split('\\n')\nn = sorted(list(map(lambda x: int(x), numbers)))\nn.insert(0, 0)\nn.append(n[-1] + 3)\ntarget = n[-1]\nmemoize = {}\n\n\ndef part2(number):\n if number == target:\n return 1\n if number in memoize.keys():\n return memoize[number]\n paths = 0\n if number + 1 in n:\n paths += part2(number + 1)\n if number + 2 in n:\n paths += part2(number + 2)\n if number + 3 in n:\n paths += part2(number + 3)\n memoize[number] = paths\n print(number, paths)\n return paths\n\n\nprint('Total:', part2(0))\n",
"step-4": "from functools import reduce\nwith open('input.txt') as f:\n numbers = f.read().split('\\n')\nn = sorted(list(map(lambda x: int(x), numbers)))\nn.insert(0, 0)\nn.append(n[-1] + 3)\ntarget = n[-1]\nmemoize = {}\n\n\ndef part2(number):\n if number == target:\n return 1\n if number in memoize.keys():\n return memoize[number]\n paths = 0\n if number + 1 in n:\n paths += part2(number + 1)\n if number + 2 in n:\n paths += part2(number + 2)\n if number + 3 in n:\n paths += part2(number + 3)\n memoize[number] = paths\n print(number, paths)\n return paths\n\n\nprint('Total:', part2(0))\n",
"step-5": "from functools import reduce\n\nwith open(\"input.txt\") as f:\n numbers = f.read().split(\"\\n\")\n\nn = sorted(list(map(lambda x: int(x), numbers)))\nn.insert(0, 0)\nn.append(n[-1] + 3)\n\ntarget = n[-1]\n\nmemoize = {}\n\n\ndef part2(number):\n if number == target:\n return 1\n if number in memoize.keys():\n return memoize[number]\n paths = 0\n if number + 1 in n:\n paths += part2(number + 1)\n if number + 2 in n:\n paths += part2(number + 2)\n if number + 3 in n:\n paths += part2(number + 3)\n memoize[number] = paths\n print(number, paths)\n return paths\n\n\nprint(\"Total:\", part2(0))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Graph:
def __init__(self, nvertices):
self.N = nvertices
self.graph = [[(0) for column in range(nvertices)] for row in range
(nvertices)]
self.V = ['0' for column in range(nvertices)]
def nameVertex(self):
for i in range(self.N):
print('Qual o rotúlo do vértice %i?' % i)
self.V[i] = input()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Graph:
def __init__(self, nvertices):
self.N = nvertices
self.graph = [[(0) for column in range(nvertices)] for row in range
(nvertices)]
self.V = ['0' for column in range(nvertices)]
def nameVertex(self):
for i in range(self.N):
print('Qual o rotúlo do vértice %i?' % i)
self.V[i] = input()
def setEdge(self, u, v, w):
self.graph[u][v] = w
self.graph[v][u] = w
def loadEdges(self):
for i in range(self.N):
for j in range(self.N):
if i > j:
print('Qual o peso entre %c e %c?' % (self.V[i], self.V[j])
)
self.setEdge(i, j, input())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Graph:
def __init__(self, nvertices):
self.N = nvertices
self.graph = [[(0) for column in range(nvertices)] for row in range
(nvertices)]
self.V = ['0' for column in range(nvertices)]
def nameVertex(self):
for i in range(self.N):
print('Qual o rotúlo do vértice %i?' % i)
self.V[i] = input()
def setEdge(self, u, v, w):
self.graph[u][v] = w
self.graph[v][u] = w
def loadEdges(self):
for i in range(self.N):
for j in range(self.N):
if i > j:
print('Qual o peso entre %c e %c?' % (self.V[i], self.V[j])
)
self.setEdge(i, j, input())
print('Qual o número de vértices?')
<|reserved_special_token_0|>
print(g.graph)
g.nameVertex()
g.loadEdges()
print(g.graph)
<|reserved_special_token_1|>
class Graph:
def __init__(self, nvertices):
self.N = nvertices
self.graph = [[(0) for column in range(nvertices)] for row in range
(nvertices)]
self.V = ['0' for column in range(nvertices)]
def nameVertex(self):
for i in range(self.N):
print('Qual o rotúlo do vértice %i?' % i)
self.V[i] = input()
def setEdge(self, u, v, w):
self.graph[u][v] = w
self.graph[v][u] = w
def loadEdges(self):
for i in range(self.N):
for j in range(self.N):
if i > j:
print('Qual o peso entre %c e %c?' % (self.V[i], self.V[j])
)
self.setEdge(i, j, input())
print('Qual o número de vértices?')
n = int(input())
g = Graph(n)
g1 = Graph(n - 1)
print(g.graph)
g.nameVertex()
g.loadEdges()
print(g.graph)
<|reserved_special_token_1|>
class Graph():
def __init__(self, nvertices):
self.N = nvertices
self.graph = [[0 for column in range(nvertices)]
for row in range(nvertices)]
self.V = ['0' for column in range(nvertices)]
def nameVertex(self):
for i in range(self.N):
print("Qual o rotúlo do vértice %i?"%(i))
self.V[i]=input()
def setEdge(self,u,v,w):
self.graph[u][v]=w
self.graph[v][u]=w
def loadEdges(self):
for i in range(self.N):
for j in range(self.N):
if i>j:
print("Qual o peso entre %c e %c?"%
(self.V[i],self.V[j]))
self.setEdge(i,j,input())
print('Qual o número de vértices?')
n = int(input())
g = Graph(n)
g1 = Graph(n-1)
print(g.graph)
g.nameVertex()
g.loadEdges()
print(g.graph)
|
flexible
|
{
"blob_id": "51a8b963047215bf864eb4a3e62beb5741dfbafe",
"index": 8572,
"step-1": "class Graph:\n\n def __init__(self, nvertices):\n self.N = nvertices\n self.graph = [[(0) for column in range(nvertices)] for row in range\n (nvertices)]\n self.V = ['0' for column in range(nvertices)]\n\n def nameVertex(self):\n for i in range(self.N):\n print('Qual o rotúlo do vértice %i?' % i)\n self.V[i] = input()\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Graph:\n\n def __init__(self, nvertices):\n self.N = nvertices\n self.graph = [[(0) for column in range(nvertices)] for row in range\n (nvertices)]\n self.V = ['0' for column in range(nvertices)]\n\n def nameVertex(self):\n for i in range(self.N):\n print('Qual o rotúlo do vértice %i?' % i)\n self.V[i] = input()\n\n def setEdge(self, u, v, w):\n self.graph[u][v] = w\n self.graph[v][u] = w\n\n def loadEdges(self):\n for i in range(self.N):\n for j in range(self.N):\n if i > j:\n print('Qual o peso entre %c e %c?' % (self.V[i], self.V[j])\n )\n self.setEdge(i, j, input())\n\n\n<mask token>\n",
"step-3": "class Graph:\n\n def __init__(self, nvertices):\n self.N = nvertices\n self.graph = [[(0) for column in range(nvertices)] for row in range\n (nvertices)]\n self.V = ['0' for column in range(nvertices)]\n\n def nameVertex(self):\n for i in range(self.N):\n print('Qual o rotúlo do vértice %i?' % i)\n self.V[i] = input()\n\n def setEdge(self, u, v, w):\n self.graph[u][v] = w\n self.graph[v][u] = w\n\n def loadEdges(self):\n for i in range(self.N):\n for j in range(self.N):\n if i > j:\n print('Qual o peso entre %c e %c?' % (self.V[i], self.V[j])\n )\n self.setEdge(i, j, input())\n\n\nprint('Qual o número de vértices?')\n<mask token>\nprint(g.graph)\ng.nameVertex()\ng.loadEdges()\nprint(g.graph)\n",
"step-4": "class Graph:\n\n def __init__(self, nvertices):\n self.N = nvertices\n self.graph = [[(0) for column in range(nvertices)] for row in range\n (nvertices)]\n self.V = ['0' for column in range(nvertices)]\n\n def nameVertex(self):\n for i in range(self.N):\n print('Qual o rotúlo do vértice %i?' % i)\n self.V[i] = input()\n\n def setEdge(self, u, v, w):\n self.graph[u][v] = w\n self.graph[v][u] = w\n\n def loadEdges(self):\n for i in range(self.N):\n for j in range(self.N):\n if i > j:\n print('Qual o peso entre %c e %c?' % (self.V[i], self.V[j])\n )\n self.setEdge(i, j, input())\n\n\nprint('Qual o número de vértices?')\nn = int(input())\ng = Graph(n)\ng1 = Graph(n - 1)\nprint(g.graph)\ng.nameVertex()\ng.loadEdges()\nprint(g.graph)\n",
"step-5": "class Graph(): \n \n def __init__(self, nvertices): \n self.N = nvertices \n self.graph = [[0 for column in range(nvertices)] \n for row in range(nvertices)] \n self.V = ['0' for column in range(nvertices)]\n\n def nameVertex(self):\n for i in range(self.N):\n print(\"Qual o rotúlo do vértice %i?\"%(i))\n self.V[i]=input()\n\n def setEdge(self,u,v,w):\n self.graph[u][v]=w\n self.graph[v][u]=w\n\n def loadEdges(self):\n for i in range(self.N):\n for j in range(self.N):\n if i>j:\n print(\"Qual o peso entre %c e %c?\"%\n (self.V[i],self.V[j]))\n self.setEdge(i,j,input())\n \n \n\n \nprint('Qual o número de vértices?')\nn = int(input())\ng = Graph(n)\ng1 = Graph(n-1)\nprint(g.graph)\ng.nameVertex()\ng.loadEdges()\nprint(g.graph)\n\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class MarkdownBlock(TextBlock):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MarkdownBlock(TextBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field = forms.CharField(required=required, help_text=help_text,
widget=MarkdownTextarea())
super(MarkdownBlock, self).__init__(**kwargs)
def render_basic(self, value, context=None):
return render_markdown(value, context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from wagtail.core.blocks import TextBlock
except ImportError:
from wagtail.wagtailcore.blocks import TextBlock
class MarkdownBlock(TextBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field = forms.CharField(required=required, help_text=help_text,
widget=MarkdownTextarea())
super(MarkdownBlock, self).__init__(**kwargs)
def render_basic(self, value, context=None):
return render_markdown(value, context)
<|reserved_special_token_1|>
from django import forms
from .utils import render_markdown
from .widgets import MarkdownTextarea
try:
from wagtail.core.blocks import TextBlock
except ImportError:
from wagtail.wagtailcore.blocks import TextBlock
class MarkdownBlock(TextBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field = forms.CharField(required=required, help_text=help_text,
widget=MarkdownTextarea())
super(MarkdownBlock, self).__init__(**kwargs)
def render_basic(self, value, context=None):
return render_markdown(value, context)
<|reserved_special_token_1|>
# vim:sw=4 ts=4 et:
# Copyright (c) 2015 Torchbox Ltd.
# tomasz.knapik@torchbox.com 2017-12-07
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely. This software is provided 'as-is', without any express or implied
# warranty.
#
from django import forms
from .utils import render_markdown
from .widgets import MarkdownTextarea
try:
from wagtail.core.blocks import TextBlock
except ImportError:
from wagtail.wagtailcore.blocks import TextBlock
class MarkdownBlock(TextBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field = forms.CharField(
required=required, help_text=help_text, widget=MarkdownTextarea()
)
super(MarkdownBlock, self).__init__(**kwargs)
def render_basic(self, value, context=None):
return render_markdown(value, context)
|
flexible
|
{
"blob_id": "6f271e6cfb03977d52c50562c3c394b962c9af83",
"index": 7538,
"step-1": "<mask token>\n\n\nclass MarkdownBlock(TextBlock):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MarkdownBlock(TextBlock):\n\n def __init__(self, required=True, help_text=None, **kwargs):\n self.field = forms.CharField(required=required, help_text=help_text,\n widget=MarkdownTextarea())\n super(MarkdownBlock, self).__init__(**kwargs)\n\n def render_basic(self, value, context=None):\n return render_markdown(value, context)\n",
"step-3": "<mask token>\ntry:\n from wagtail.core.blocks import TextBlock\nexcept ImportError:\n from wagtail.wagtailcore.blocks import TextBlock\n\n\nclass MarkdownBlock(TextBlock):\n\n def __init__(self, required=True, help_text=None, **kwargs):\n self.field = forms.CharField(required=required, help_text=help_text,\n widget=MarkdownTextarea())\n super(MarkdownBlock, self).__init__(**kwargs)\n\n def render_basic(self, value, context=None):\n return render_markdown(value, context)\n",
"step-4": "from django import forms\nfrom .utils import render_markdown\nfrom .widgets import MarkdownTextarea\ntry:\n from wagtail.core.blocks import TextBlock\nexcept ImportError:\n from wagtail.wagtailcore.blocks import TextBlock\n\n\nclass MarkdownBlock(TextBlock):\n\n def __init__(self, required=True, help_text=None, **kwargs):\n self.field = forms.CharField(required=required, help_text=help_text,\n widget=MarkdownTextarea())\n super(MarkdownBlock, self).__init__(**kwargs)\n\n def render_basic(self, value, context=None):\n return render_markdown(value, context)\n",
"step-5": "# vim:sw=4 ts=4 et:\n# Copyright (c) 2015 Torchbox Ltd.\n# tomasz.knapik@torchbox.com 2017-12-07\n#\n# Permission is granted to anyone to use this software for any purpose,\n# including commercial applications, and to alter it and redistribute it\n# freely. This software is provided 'as-is', without any express or implied\n# warranty.\n#\nfrom django import forms\n\nfrom .utils import render_markdown\nfrom .widgets import MarkdownTextarea\n\ntry:\n from wagtail.core.blocks import TextBlock\nexcept ImportError:\n from wagtail.wagtailcore.blocks import TextBlock\n\n\nclass MarkdownBlock(TextBlock):\n def __init__(self, required=True, help_text=None, **kwargs):\n self.field = forms.CharField(\n required=required, help_text=help_text, widget=MarkdownTextarea()\n )\n super(MarkdownBlock, self).__init__(**kwargs)\n\n def render_basic(self, value, context=None):\n return render_markdown(value, context)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Especialidade(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Especialidade(models.Model):
def __str__(self):
return self.nome
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Especialidade(models.Model):
def __str__(self):
return self.nome
nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),
unique=True, blank=False, null=False)
<|reserved_special_token_1|>
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Especialidade(models.Model):
def __str__(self):
return self.nome
nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),
unique=True, blank=False, null=False)
<|reserved_special_token_1|>
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Especialidade(models.Model):
def __str__(self):
return self.nome
# add unique=True?
nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)
|
flexible
|
{
"blob_id": "9cc672702d960088f0230cbd1694b295216d8b5a",
"index": 4617,
"step-1": "<mask token>\n\n\nclass Especialidade(models.Model):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),\n unique=True, blank=False, null=False)\n",
"step-4": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),\n unique=True, blank=False, null=False)\n",
"step-5": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Especialidade(models.Model):\n def __str__(self):\n return self.nome\n\n # add unique=True?\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Yolo(Yolov3):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def freeze(self):
graph_def = tf.graph_util.convert_variables_to_constants(sess=self.
sess, input_graph_def=tf.get_default_graph().as_graph_def(),
output_node_names=['detections/output'])
with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def defrost(self):
with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Found a frozen yolov3 model, defrost and use!')
tf.import_graph_def(graph_def)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Yolo(Yolov3):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def predict(self, input_list, confidence_theshold=0.6, iou_threshold=0.5):
feed_dict = {self.input: input_list}
batch_detections = self.sess.run(self.output, feed_dict)
return predict(batch_detections, confidence_theshold, iou_threshold)
def freeze(self):
graph_def = tf.graph_util.convert_variables_to_constants(sess=self.
sess, input_graph_def=tf.get_default_graph().as_graph_def(),
output_node_names=['detections/output'])
with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def defrost(self):
with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Found a frozen yolov3 model, defrost and use!')
tf.import_graph_def(graph_def)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Yolo(Yolov3):
sess = tf.Session()
def __init__(self, input=None, weight_path=None, is_training=False):
self.is_training = is_training
try:
self.defrost()
self.input = tf.get_default_graph().get_tensor_by_name(
'import/input:0')
self.output = tf.get_default_graph().get_tensor_by_name(
'import/detections/output:0')
except:
if not input:
input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input'
)
self.input = input
self.input_size = self.input.get_shape().as_list()[1]
with tf.variable_scope('detections'):
self.output = self.graph()
self.loader = Weight_loader(tf.global_variables('detections'),
weight_path)
self.sess.run(self.loader.load_now())
self.freeze()
def predict(self, input_list, confidence_theshold=0.6, iou_threshold=0.5):
feed_dict = {self.input: input_list}
batch_detections = self.sess.run(self.output, feed_dict)
return predict(batch_detections, confidence_theshold, iou_threshold)
def freeze(self):
graph_def = tf.graph_util.convert_variables_to_constants(sess=self.
sess, input_graph_def=tf.get_default_graph().as_graph_def(),
output_node_names=['detections/output'])
with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def defrost(self):
with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Found a frozen yolov3 model, defrost and use!')
tf.import_graph_def(graph_def)
<|reserved_special_token_1|>
import tensorflow as tf
from yolov3 import *
from predict import predict
from load import Weight_loader
class Yolo(Yolov3):
sess = tf.Session()
def __init__(self, input=None, weight_path=None, is_training=False):
self.is_training = is_training
try:
self.defrost()
self.input = tf.get_default_graph().get_tensor_by_name(
'import/input:0')
self.output = tf.get_default_graph().get_tensor_by_name(
'import/detections/output:0')
except:
if not input:
input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input'
)
self.input = input
self.input_size = self.input.get_shape().as_list()[1]
with tf.variable_scope('detections'):
self.output = self.graph()
self.loader = Weight_loader(tf.global_variables('detections'),
weight_path)
self.sess.run(self.loader.load_now())
self.freeze()
def predict(self, input_list, confidence_theshold=0.6, iou_threshold=0.5):
feed_dict = {self.input: input_list}
batch_detections = self.sess.run(self.output, feed_dict)
return predict(batch_detections, confidence_theshold, iou_threshold)
def freeze(self):
graph_def = tf.graph_util.convert_variables_to_constants(sess=self.
sess, input_graph_def=tf.get_default_graph().as_graph_def(),
output_node_names=['detections/output'])
with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def defrost(self):
with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Found a frozen yolov3 model, defrost and use!')
tf.import_graph_def(graph_def)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import tensorflow as tf
from yolov3 import *
from predict import predict
from load import Weight_loader
class Yolo(Yolov3):
sess = tf.Session()
def __init__(self, input=None, weight_path=None, is_training=False):
self.is_training = is_training
try:
self.defrost()
self.input = tf.get_default_graph().get_tensor_by_name('import/input:0')
self.output = tf.get_default_graph().get_tensor_by_name('import/detections/output:0')
except:
if not input:
input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input')
self.input = input
self.input_size = self.input.get_shape().as_list()[1]
with tf.variable_scope('detections'):
self.output = self.graph()
self.loader = Weight_loader(tf.global_variables('detections'), weight_path)
# self.sess.run(tf.global_variables_initializer())
self.sess.run(self.loader.load_now())
self.freeze()
def predict(self, input_list, confidence_theshold=.6, iou_threshold=.5):
feed_dict = {self.input: input_list}
batch_detections = self.sess.run(self.output, feed_dict)
return predict(batch_detections, confidence_theshold, iou_threshold)
def freeze(self):
graph_def = tf.graph_util.convert_variables_to_constants(sess=self.sess,
input_graph_def=tf.get_default_graph().as_graph_def(),
output_node_names=['detections/output'])
with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def defrost(self):
with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Found a frozen yolov3 model, defrost and use!')
tf.import_graph_def(graph_def)
|
flexible
|
{
"blob_id": "f3d34379cc7fbfe211eeebec424112f3da0ab724",
"index": 7999,
"step-1": "<mask token>\n\n\nclass Yolo(Yolov3):\n <mask token>\n <mask token>\n <mask token>\n\n def freeze(self):\n graph_def = tf.graph_util.convert_variables_to_constants(sess=self.\n sess, input_graph_def=tf.get_default_graph().as_graph_def(),\n output_node_names=['detections/output'])\n with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n def defrost(self):\n with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n print('Found a frozen yolov3 model, defrost and use!')\n tf.import_graph_def(graph_def)\n",
"step-2": "<mask token>\n\n\nclass Yolo(Yolov3):\n <mask token>\n <mask token>\n\n def predict(self, input_list, confidence_theshold=0.6, iou_threshold=0.5):\n feed_dict = {self.input: input_list}\n batch_detections = self.sess.run(self.output, feed_dict)\n return predict(batch_detections, confidence_theshold, iou_threshold)\n\n def freeze(self):\n graph_def = tf.graph_util.convert_variables_to_constants(sess=self.\n sess, input_graph_def=tf.get_default_graph().as_graph_def(),\n output_node_names=['detections/output'])\n with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n def defrost(self):\n with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n print('Found a frozen yolov3 model, defrost and use!')\n tf.import_graph_def(graph_def)\n",
"step-3": "<mask token>\n\n\nclass Yolo(Yolov3):\n sess = tf.Session()\n\n def __init__(self, input=None, weight_path=None, is_training=False):\n self.is_training = is_training\n try:\n self.defrost()\n self.input = tf.get_default_graph().get_tensor_by_name(\n 'import/input:0')\n self.output = tf.get_default_graph().get_tensor_by_name(\n 'import/detections/output:0')\n except:\n if not input:\n input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input'\n )\n self.input = input\n self.input_size = self.input.get_shape().as_list()[1]\n with tf.variable_scope('detections'):\n self.output = self.graph()\n self.loader = Weight_loader(tf.global_variables('detections'),\n weight_path)\n self.sess.run(self.loader.load_now())\n self.freeze()\n\n def predict(self, input_list, confidence_theshold=0.6, iou_threshold=0.5):\n feed_dict = {self.input: input_list}\n batch_detections = self.sess.run(self.output, feed_dict)\n return predict(batch_detections, confidence_theshold, iou_threshold)\n\n def freeze(self):\n graph_def = tf.graph_util.convert_variables_to_constants(sess=self.\n sess, input_graph_def=tf.get_default_graph().as_graph_def(),\n output_node_names=['detections/output'])\n with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n def defrost(self):\n with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n print('Found a frozen yolov3 model, defrost and use!')\n tf.import_graph_def(graph_def)\n",
"step-4": "import tensorflow as tf\nfrom yolov3 import *\nfrom predict import predict\nfrom load import Weight_loader\n\n\nclass Yolo(Yolov3):\n sess = tf.Session()\n\n def __init__(self, input=None, weight_path=None, is_training=False):\n self.is_training = is_training\n try:\n self.defrost()\n self.input = tf.get_default_graph().get_tensor_by_name(\n 'import/input:0')\n self.output = tf.get_default_graph().get_tensor_by_name(\n 'import/detections/output:0')\n except:\n if not input:\n input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input'\n )\n self.input = input\n self.input_size = self.input.get_shape().as_list()[1]\n with tf.variable_scope('detections'):\n self.output = self.graph()\n self.loader = Weight_loader(tf.global_variables('detections'),\n weight_path)\n self.sess.run(self.loader.load_now())\n self.freeze()\n\n def predict(self, input_list, confidence_theshold=0.6, iou_threshold=0.5):\n feed_dict = {self.input: input_list}\n batch_detections = self.sess.run(self.output, feed_dict)\n return predict(batch_detections, confidence_theshold, iou_threshold)\n\n def freeze(self):\n graph_def = tf.graph_util.convert_variables_to_constants(sess=self.\n sess, input_graph_def=tf.get_default_graph().as_graph_def(),\n output_node_names=['detections/output'])\n with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n def defrost(self):\n with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n print('Found a frozen yolov3 model, defrost and use!')\n tf.import_graph_def(graph_def)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom yolov3 import *\nfrom predict import predict\nfrom load import Weight_loader\n\nclass Yolo(Yolov3):\n\n sess = tf.Session()\n \n def __init__(self, input=None, weight_path=None, is_training=False):\n self.is_training = is_training\n try:\n self.defrost()\n self.input = tf.get_default_graph().get_tensor_by_name('import/input:0')\n self.output = tf.get_default_graph().get_tensor_by_name('import/detections/output:0')\n except:\n if not input:\n input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input')\n self.input = input\n self.input_size = self.input.get_shape().as_list()[1]\n with tf.variable_scope('detections'):\n self.output = self.graph() \n self.loader = Weight_loader(tf.global_variables('detections'), weight_path)\n # self.sess.run(tf.global_variables_initializer())\n self.sess.run(self.loader.load_now())\n self.freeze()\n\n def predict(self, input_list, confidence_theshold=.6, iou_threshold=.5):\n feed_dict = {self.input: input_list}\n batch_detections = self.sess.run(self.output, feed_dict)\n return predict(batch_detections, confidence_theshold, iou_threshold)\n\n def freeze(self):\n graph_def = tf.graph_util.convert_variables_to_constants(sess=self.sess,\n input_graph_def=tf.get_default_graph().as_graph_def(),\n output_node_names=['detections/output'])\n with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n def defrost(self):\n with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n print('Found a frozen yolov3 model, defrost and use!') \n tf.import_graph_def(graph_def)\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class AppusersClient(BaseClient):
<|reserved_special_token_0|>
@api(rule='/app_users/app_order_create_info', method='get', is_json_req
=True)
def app_order_create_info(self, order_id: int=None):
"""
订单创建个人账号页信息
:return:
"""
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)
def set_allot_admin(self, app_user_ids, allot_admin):
"""
设置分配管理员
:param app_user_ids:个人账号IDs 的数组
:param allot_admin:设置分配管理员,(0:否|1:是)
:return:
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppusersClient(BaseClient):
def __init__(self, base_url, access_token=None, **kwargs):
super().__init__(base_url, kwargs)
self.access_token = access_token
self.req_kwargs.update({'headers': {'Authorization': self.
access_token}})
self.interceptor = lambda r, j: Bunch(j)
@api(rule='/app_users/app_order_create_info', method='get', is_json_req
=True)
def app_order_create_info(self, order_id: int=None):
"""
订单创建个人账号页信息
:return:
"""
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)
def set_allot_admin(self, app_user_ids, allot_admin):
"""
设置分配管理员
:param app_user_ids:个人账号IDs 的数组
:param allot_admin:设置分配管理员,(0:否|1:是)
:return:
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppusersClient(BaseClient):
def __init__(self, base_url, access_token=None, **kwargs):
super().__init__(base_url, kwargs)
self.access_token = access_token
self.req_kwargs.update({'headers': {'Authorization': self.
access_token}})
self.interceptor = lambda r, j: Bunch(j)
@api(rule='/app_users/app_order_create_info', method='get', is_json_req
=True)
def app_order_create_info(self, order_id: int=None):
"""
订单创建个人账号页信息
:return:
"""
def contract_upload_for_user(self, sub_firm_id, contract_file):
"""
单个创建账号的合同文件
:param contract_file: 合同文件
:param sub_firm_id: 公司id
:return:
"""
return self._call_api('/app_users/contract_upload', method='POST',
req_kwargs=dict(data={'sub_firm_id': sub_firm_id}, files=dict(
contract_file=open(contract_file, 'rb'))), disable_log=True)
@api(rule='/app_users/setting', is_json_req=True)
def app_users_setting(self, id):
"""
账号编辑设置
:param id: 个人账号id
:return:
"""
@api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)
def set_allot_admin(self, app_user_ids, allot_admin):
"""
设置分配管理员
:param app_user_ids:个人账号IDs 的数组
:param allot_admin:设置分配管理员,(0:否|1:是)
:return:
"""
pass
<|reserved_special_token_1|>
from qav5.http.client import BaseClient
from qav5.http.helper import api
from qav5.utils import Bunch, low_case_to_camelcase
class AppusersClient(BaseClient):
def __init__(self, base_url, access_token=None, **kwargs):
super().__init__(base_url, kwargs)
self.access_token = access_token
self.req_kwargs.update({'headers': {'Authorization': self.
access_token}})
self.interceptor = lambda r, j: Bunch(j)
@api(rule='/app_users/app_order_create_info', method='get', is_json_req
=True)
def app_order_create_info(self, order_id: int=None):
"""
订单创建个人账号页信息
:return:
"""
def contract_upload_for_user(self, sub_firm_id, contract_file):
"""
单个创建账号的合同文件
:param contract_file: 合同文件
:param sub_firm_id: 公司id
:return:
"""
return self._call_api('/app_users/contract_upload', method='POST',
req_kwargs=dict(data={'sub_firm_id': sub_firm_id}, files=dict(
contract_file=open(contract_file, 'rb'))), disable_log=True)
@api(rule='/app_users/setting', is_json_req=True)
def app_users_setting(self, id):
"""
账号编辑设置
:param id: 个人账号id
:return:
"""
@api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)
def set_allot_admin(self, app_user_ids, allot_admin):
"""
设置分配管理员
:param app_user_ids:个人账号IDs 的数组
:param allot_admin:设置分配管理员,(0:否|1:是)
:return:
"""
pass
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from qav5.http.client import BaseClient
from qav5.http.helper import api
from qav5.utils import Bunch, low_case_to_camelcase
class AppusersClient(BaseClient):
def __init__(self, base_url, access_token=None, **kwargs):
super().__init__(base_url, kwargs)
self.access_token = access_token
self.req_kwargs.update({"headers": {"Authorization": self.access_token}})
self.interceptor = lambda r, j: Bunch(j)
@api(rule="/app_users/app_order_create_info", method="get", is_json_req=True)
def app_order_create_info(self,order_id:int=None):
"""
订单创建个人账号页信息
:return:
"""
def contract_upload_for_user(self, sub_firm_id, contract_file):
"""
单个创建账号的合同文件
:param contract_file: 合同文件
:param sub_firm_id: 公司id
:return:
"""
return self._call_api("/app_users/contract_upload", method='POST',
req_kwargs=dict(data={"sub_firm_id": sub_firm_id},
files=dict(contract_file=open(contract_file, 'rb'))),
disable_log=True)
@api(rule="/app_users/setting", is_json_req=True)
def app_users_setting(self,id):
"""
账号编辑设置
:param id: 个人账号id
:return:
"""
@api(rule="/app_users/set_allot_admin", is_json_req=True, remove_null=True)
def set_allot_admin(self, app_user_ids, allot_admin):
"""
设置分配管理员
:param app_user_ids:个人账号IDs 的数组
:param allot_admin:设置分配管理员,(0:否|1:是)
:return:
"""
pass
|
flexible
|
{
"blob_id": "1af6bda6eb4e7a46b22379180ea82e78c67ce771",
"index": 4269,
"step-1": "<mask token>\n\n\nclass AppusersClient(BaseClient):\n <mask token>\n\n @api(rule='/app_users/app_order_create_info', method='get', is_json_req\n =True)\n def app_order_create_info(self, order_id: int=None):\n \"\"\"\n 订单创建个人账号页信息\n :return:\n \"\"\"\n <mask token>\n <mask token>\n\n @api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)\n def set_allot_admin(self, app_user_ids, allot_admin):\n \"\"\"\n 设置分配管理员\n :param app_user_ids:个人账号IDs 的数组\n :param allot_admin:设置分配管理员,(0:否|1:是)\n :return:\n \"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass AppusersClient(BaseClient):\n\n def __init__(self, base_url, access_token=None, **kwargs):\n super().__init__(base_url, kwargs)\n self.access_token = access_token\n self.req_kwargs.update({'headers': {'Authorization': self.\n access_token}})\n self.interceptor = lambda r, j: Bunch(j)\n\n @api(rule='/app_users/app_order_create_info', method='get', is_json_req\n =True)\n def app_order_create_info(self, order_id: int=None):\n \"\"\"\n 订单创建个人账号页信息\n :return:\n \"\"\"\n <mask token>\n <mask token>\n\n @api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)\n def set_allot_admin(self, app_user_ids, allot_admin):\n \"\"\"\n 设置分配管理员\n :param app_user_ids:个人账号IDs 的数组\n :param allot_admin:设置分配管理员,(0:否|1:是)\n :return:\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass AppusersClient(BaseClient):\n\n def __init__(self, base_url, access_token=None, **kwargs):\n super().__init__(base_url, kwargs)\n self.access_token = access_token\n self.req_kwargs.update({'headers': {'Authorization': self.\n access_token}})\n self.interceptor = lambda r, j: Bunch(j)\n\n @api(rule='/app_users/app_order_create_info', method='get', is_json_req\n =True)\n def app_order_create_info(self, order_id: int=None):\n \"\"\"\n 订单创建个人账号页信息\n :return:\n \"\"\"\n\n def contract_upload_for_user(self, sub_firm_id, contract_file):\n \"\"\"\n 单个创建账号的合同文件\n :param contract_file: 合同文件\n :param sub_firm_id: 公司id\n :return:\n \"\"\"\n return self._call_api('/app_users/contract_upload', method='POST',\n req_kwargs=dict(data={'sub_firm_id': sub_firm_id}, files=dict(\n contract_file=open(contract_file, 'rb'))), disable_log=True)\n\n @api(rule='/app_users/setting', is_json_req=True)\n def app_users_setting(self, id):\n \"\"\"\n 账号编辑设置\n :param id: 个人账号id\n :return:\n \"\"\"\n\n @api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)\n def set_allot_admin(self, app_user_ids, allot_admin):\n \"\"\"\n 设置分配管理员\n :param app_user_ids:个人账号IDs 的数组\n :param allot_admin:设置分配管理员,(0:否|1:是)\n :return:\n \"\"\"\n pass\n",
"step-4": "from qav5.http.client import BaseClient\nfrom qav5.http.helper import api\nfrom qav5.utils import Bunch, low_case_to_camelcase\n\n\nclass AppusersClient(BaseClient):\n\n def __init__(self, base_url, access_token=None, **kwargs):\n super().__init__(base_url, kwargs)\n self.access_token = access_token\n self.req_kwargs.update({'headers': {'Authorization': self.\n access_token}})\n self.interceptor = lambda r, j: Bunch(j)\n\n @api(rule='/app_users/app_order_create_info', method='get', is_json_req\n =True)\n def app_order_create_info(self, order_id: int=None):\n \"\"\"\n 订单创建个人账号页信息\n :return:\n \"\"\"\n\n def contract_upload_for_user(self, sub_firm_id, contract_file):\n \"\"\"\n 单个创建账号的合同文件\n :param contract_file: 合同文件\n :param sub_firm_id: 公司id\n :return:\n \"\"\"\n return self._call_api('/app_users/contract_upload', method='POST',\n req_kwargs=dict(data={'sub_firm_id': sub_firm_id}, files=dict(\n contract_file=open(contract_file, 'rb'))), disable_log=True)\n\n @api(rule='/app_users/setting', is_json_req=True)\n def app_users_setting(self, id):\n \"\"\"\n 账号编辑设置\n :param id: 个人账号id\n :return:\n \"\"\"\n\n @api(rule='/app_users/set_allot_admin', is_json_req=True, remove_null=True)\n def set_allot_admin(self, app_user_ids, allot_admin):\n \"\"\"\n 设置分配管理员\n :param app_user_ids:个人账号IDs 的数组\n :param allot_admin:设置分配管理员,(0:否|1:是)\n :return:\n \"\"\"\n pass\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom qav5.http.client import BaseClient\nfrom qav5.http.helper import api\nfrom qav5.utils import Bunch, low_case_to_camelcase\n\n\nclass AppusersClient(BaseClient):\n def __init__(self, base_url, access_token=None, **kwargs):\n super().__init__(base_url, kwargs)\n self.access_token = access_token\n self.req_kwargs.update({\"headers\": {\"Authorization\": self.access_token}})\n self.interceptor = lambda r, j: Bunch(j)\n\n @api(rule=\"/app_users/app_order_create_info\", method=\"get\", is_json_req=True)\n def app_order_create_info(self,order_id:int=None):\n \"\"\"\n 订单创建个人账号页信息\n :return:\n \"\"\"\n\n def contract_upload_for_user(self, sub_firm_id, contract_file):\n \"\"\"\n 单个创建账号的合同文件\n :param contract_file: 合同文件\n :param sub_firm_id: 公司id\n :return:\n \"\"\"\n return self._call_api(\"/app_users/contract_upload\", method='POST',\n req_kwargs=dict(data={\"sub_firm_id\": sub_firm_id},\n files=dict(contract_file=open(contract_file, 'rb'))),\n disable_log=True)\n\n @api(rule=\"/app_users/setting\", is_json_req=True)\n def app_users_setting(self,id):\n \"\"\"\n 账号编辑设置\n :param id: 个人账号id\n :return:\n \"\"\"\n\n @api(rule=\"/app_users/set_allot_admin\", is_json_req=True, remove_null=True)\n def set_allot_admin(self, app_user_ids, allot_admin):\n \"\"\"\n 设置分配管理员\n :param app_user_ids:个人账号IDs 的数组\n :param allot_admin:设置分配管理员,(0:否|1:是)\n :return:\n \"\"\"\n pass\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class DaqListType(IntEnum):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DaqListType(IntEnum):
<|reserved_special_token_0|>
DAQ = 1
STIM = 2
DAQ_STIM = 3
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DaqListType(IntEnum):
"""
This class describes a daq list type.
"""
DAQ = 1
STIM = 2
DAQ_STIM = 3
<|reserved_special_token_1|>
from enum import IntEnum
class DaqListType(IntEnum):
"""
This class describes a daq list type.
"""
DAQ = 1
STIM = 2
DAQ_STIM = 3
<|reserved_special_token_1|>
from enum import IntEnum
class DaqListType(IntEnum):
"""
This class describes a daq list type.
"""
DAQ = 0x01
STIM = 0x02
DAQ_STIM = 0x03
|
flexible
|
{
"blob_id": "71e0137fc02b4f56bdf87cc15c275f5cca1588c4",
"index": 8925,
"step-1": "<mask token>\n\n\nclass DaqListType(IntEnum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DaqListType(IntEnum):\n <mask token>\n DAQ = 1\n STIM = 2\n DAQ_STIM = 3\n",
"step-3": "<mask token>\n\n\nclass DaqListType(IntEnum):\n \"\"\"\n This class describes a daq list type.\n \"\"\"\n DAQ = 1\n STIM = 2\n DAQ_STIM = 3\n",
"step-4": "from enum import IntEnum\n\n\nclass DaqListType(IntEnum):\n \"\"\"\n This class describes a daq list type.\n \"\"\"\n DAQ = 1\n STIM = 2\n DAQ_STIM = 3\n",
"step-5": "from enum import IntEnum\n\nclass DaqListType(IntEnum):\n \"\"\"\n This class describes a daq list type.\n \"\"\"\n DAQ = 0x01\n STIM = 0x02\n DAQ_STIM = 0x03",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['getLevelName', 'getLevel']
<|reserved_special_token_0|>
def getLevelName(level, format='%s', no_match=None):
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
if level in logging._nameToLevel:
return format % level
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise "parameter 'level' must reduce to an Integer"
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
<|reserved_special_token_1|>
from __future__ import print_function, absolute_import, unicode_literals, division
__all__ = ['getLevelName', 'getLevel']
import logging
def getLevelName(level, format='%s', no_match=None):
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
if level in logging._nameToLevel:
return format % level
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise "parameter 'level' must reduce to an Integer"
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals, division
__all__ = ['getLevelName', 'getLevel'] #, 'getLevelOrName', '_checkLevel']
import logging
# private re-implementations till Python Core fixes Lib/logging
# XXX bug numbers here
def getLevelName(level, format='%s', no_match=None):
# strict={'case': False, 'type': False, 'map': False},
# fixup=False
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
# check Name->Level in case called incorrectly (backward compat)
if level in logging._nameToLevel:
return format % level
# retval = _checkLevel(level, flags, fix=T/F)
# if isinstance(retval, bool) then handle pass/fail, else update level with fixed value
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise("parameter 'level' must reduce to an Integer")
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
# strict={'case': False, 'type': False, 'map': False},
# fixup=False
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise("parameter 'levelName' must be a defined String")
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
#TODO define check as dictionary
pass
# """Check parameter against defined values
#
# Returns corresponding or original Integer, or NOTSET if no-match.
# Will raise TypeError or ValueError as applicable.
#
# NOTE: Since all logging.$level() functions choose to emit based on
# numeric comparison, a default of ERROR would be more logical.
# """
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
# if rv is None:
# XXX what now?
if isinstance(level, int) or not type:
# flip negative values
level = int(level)
if level in _levelToName(level):
rv = level
else:
# tolerate any Integer value
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or (not type and int(level) in _levelToName):
rv = NOTSET if level < NOTSET else level
# rv = level
if rv is None and map:
raise ValueError
else:
# return parameter even though invalid
rv = level
# sor level < NOTSET or level > ???:
# #raise ValueError
# if isinstance(level, int):
# XXX check >NOTSET
# else:
# raise TypeError
#FIXME - test harness injects '+1', so tolerating
# arbitrary integers is expected behavior. Why?
# raise ValueError
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
# test harness (../test/test_logging) expects 'TypeError' ONLY
raise TypeError("Level not an integer or a valid string: %r" % level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
|
flexible
|
{
"blob_id": "ba8b46f830abaaaedf1730cba2f04fd677f11da4",
"index": 182,
"step-1": "<mask token>\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n pass\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n if isinstance(level, int) or not type:\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or not type and int(level\n ) in _levelToName:\n rv = NOTSET if level < NOTSET else level\n if rv is None and map:\n raise ValueError\n else:\n rv = level\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n raise TypeError('Level not an integer or a valid string: %r' %\n level) from err\n except Exception:\n pass\n return NOTSET - 1 if rv is None else rv\n",
"step-3": "<mask token>\n__all__ = ['getLevelName', 'getLevel']\n<mask token>\n\n\ndef getLevelName(level, format='%s', no_match=None):\n \"\"\"Return the textual representation of 'level'.\n\n Whether predefined (eg. CRITICAL -> \"CRITICAL\") or user-defined via\n addLevelName(), the string associated with 'level' is chosen.\n Otherwise, 'level' (no_match == NONE) or 'no_match' is returned\n subject to formatting per 'format'.\n\n In the spirit of \"be liberal in what you accept\", any value of 'level'\n that survives int() will be accepted (FUTURE: subject to 'strict').\n\n Issue #29220 introduced the BAD IDEA that passing an empty string\n (an obvious TypeError) would return same. This was requested in order\n to squash the fall-thru behavior of returning \"Level %s\", when the\n multi-word response was itself the actual ERROR since it broke all\n field-based log processing! The astute reader will note that an empty\n string causes the same pathology...\n\n DEPRECATION WARNING:\n This function WRONGLY returned the mapped Integer if a String form\n was provided. This violates the clearly stated purpose and forces\n the caller into defensive Type checks or suffer future TypeErrors.\n\n NOTE:\n Does no bounds or validity checks. Use _checkLevel().\n\n FUTURE:\n In strict mode, enforce parameter dataType, case, or membership.\n \"\"\"\n try:\n if level in logging._nameToLevel:\n return format % level\n result = logging._levelToName.get(int(level))\n if result is not None:\n return format % result\n except TypeError:\n if raiseExceptions:\n raise \"parameter 'level' must reduce to an Integer\"\n except ValueError:\n pass\n return format % level if no_match is None else format % no_match\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n pass\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n if isinstance(level, int) or not type:\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or not type and int(level\n ) in _levelToName:\n rv = NOTSET if level < NOTSET else level\n if rv is None and map:\n raise ValueError\n else:\n rv = level\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n raise TypeError('Level not an integer or a valid string: %r' %\n level) from err\n except Exception:\n pass\n return NOTSET - 1 if rv is None else rv\n",
"step-4": "from __future__ import print_function, absolute_import, unicode_literals, division\n__all__ = ['getLevelName', 'getLevel']\nimport logging\n\n\ndef getLevelName(level, format='%s', no_match=None):\n \"\"\"Return the textual representation of 'level'.\n\n Whether predefined (eg. CRITICAL -> \"CRITICAL\") or user-defined via\n addLevelName(), the string associated with 'level' is chosen.\n Otherwise, 'level' (no_match == NONE) or 'no_match' is returned\n subject to formatting per 'format'.\n\n In the spirit of \"be liberal in what you accept\", any value of 'level'\n that survives int() will be accepted (FUTURE: subject to 'strict').\n\n Issue #29220 introduced the BAD IDEA that passing an empty string\n (an obvious TypeError) would return same. This was requested in order\n to squash the fall-thru behavior of returning \"Level %s\", when the\n multi-word response was itself the actual ERROR since it broke all\n field-based log processing! The astute reader will note that an empty\n string causes the same pathology...\n\n DEPRECATION WARNING:\n This function WRONGLY returned the mapped Integer if a String form\n was provided. This violates the clearly stated purpose and forces\n the caller into defensive Type checks or suffer future TypeErrors.\n\n NOTE:\n Does no bounds or validity checks. Use _checkLevel().\n\n FUTURE:\n In strict mode, enforce parameter dataType, case, or membership.\n \"\"\"\n try:\n if level in logging._nameToLevel:\n return format % level\n result = logging._levelToName.get(int(level))\n if result is not None:\n return format % result\n except TypeError:\n if raiseExceptions:\n raise \"parameter 'level' must reduce to an Integer\"\n except ValueError:\n pass\n return format % level if no_match is None else format % no_match\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n pass\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n if isinstance(level, int) or not type:\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or not type and int(level\n ) in _levelToName:\n rv = NOTSET if level < NOTSET else level\n if rv is None and map:\n raise ValueError\n else:\n rv = level\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n raise TypeError('Level not an integer or a valid string: %r' %\n level) from err\n except Exception:\n pass\n return NOTSET - 1 if rv is None else rv\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, unicode_literals, division\n\n__all__ = ['getLevelName', 'getLevel'] #, 'getLevelOrName', '_checkLevel']\n\nimport logging\n\n# private re-implementations till Python Core fixes Lib/logging\n# XXX bug numbers here\n\ndef getLevelName(level, format='%s', no_match=None):\n# strict={'case': False, 'type': False, 'map': False},\n# fixup=False\n \"\"\"Return the textual representation of 'level'.\n\n Whether predefined (eg. CRITICAL -> \"CRITICAL\") or user-defined via\n addLevelName(), the string associated with 'level' is chosen.\n Otherwise, 'level' (no_match == NONE) or 'no_match' is returned\n subject to formatting per 'format'.\n\n In the spirit of \"be liberal in what you accept\", any value of 'level'\n that survives int() will be accepted (FUTURE: subject to 'strict').\n\n Issue #29220 introduced the BAD IDEA that passing an empty string\n (an obvious TypeError) would return same. This was requested in order\n to squash the fall-thru behavior of returning \"Level %s\", when the\n multi-word response was itself the actual ERROR since it broke all\n field-based log processing! The astute reader will note that an empty\n string causes the same pathology...\n\n DEPRECATION WARNING:\n This function WRONGLY returned the mapped Integer if a String form\n was provided. This violates the clearly stated purpose and forces\n the caller into defensive Type checks or suffer future TypeErrors.\n\n NOTE:\n Does no bounds or validity checks. Use _checkLevel().\n\n FUTURE:\n In strict mode, enforce parameter dataType, case, or membership.\n \"\"\"\n\n try:\n # check Name->Level in case called incorrectly (backward compat)\n if level in logging._nameToLevel:\n return format % level\n\n # retval = _checkLevel(level, flags, fix=T/F)\n # if isinstance(retval, bool) then handle pass/fail, else update level with fixed value\n\n result = logging._levelToName.get(int(level))\n if result is not None:\n return format % result\n\n except TypeError:\n if raiseExceptions:\n raise(\"parameter 'level' must reduce to an Integer\")\n except ValueError:\n pass\n\n return format % level if no_match is None else format % no_match\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n# strict={'case': False, 'type': False, 'map': False},\n# fixup=False\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n\n return int(levelName)\n\n except ValueError:\n if raiseExceptions:\n raise(\"parameter 'levelName' must be a defined String\")\n\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n #TODO define check as dictionary\n pass\n # \"\"\"Check parameter against defined values\n #\n # Returns corresponding or original Integer, or NOTSET if no-match.\n # Will raise TypeError or ValueError as applicable.\n #\n # NOTE: Since all logging.$level() functions choose to emit based on\n # numeric comparison, a default of ERROR would be more logical.\n # \"\"\"\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n # if rv is None:\n # XXX what now?\n if isinstance(level, int) or not type:\n # flip negative values\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n # tolerate any Integer value\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or (not type and int(level) in _levelToName):\n rv = NOTSET if level < NOTSET else level\n # rv = level\n if rv is None and map:\n raise ValueError\n else:\n # return parameter even though invalid\n rv = level\n # sor level < NOTSET or level > ???:\n # #raise ValueError\n # if isinstance(level, int):\n # XXX check >NOTSET\n # else:\n # raise TypeError\n #FIXME - test harness injects '+1', so tolerating\n # arbitrary integers is expected behavior. Why?\n # raise ValueError\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n # test harness (../test/test_logging) expects 'TypeError' ONLY\n raise TypeError(\"Level not an integer or a valid string: %r\" % level) from err\n except Exception:\n pass\n\n return NOTSET - 1 if rv is None else rv\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import os
import pickle
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import embed
from optimizers.utils_1 import Model_1, Architecture_1
from optimizers.utils import Model, Architecture
colors={
'BOHB-PC-DARTS': 'darkorange',
'BOHB-DARTS': 'dodgerblue',
'BOHB-GDAS' : 'forestgreen',
'RE': 'crimson',
'RS': 'darkorchid',
'RL': 'sienna',
'TPE': 'deepskyblue',
'SMAC': 'violet',
'HB': 'darkgray',
'BOHB': 'gold'
}
markers={
'BOHB-DARTS': '^',
'BOHB-PC-DARTS': 'v',
'BOHB-GDAS' : 'x',
'RS': 'D',
'RE': 'o',
'RL': 's',
'SMAC': 'h',
'HB': '>',
'BOHB': '*',
'TPE': '<'
}
def get_incumbent(losses, time_stamps):
return_dict = {'time_stamps': [],
'losses': [],
}
current_incumbent = float('inf')
incumbent_budget = -float('inf')
for l, t in zip(losses, time_stamps):
if l < current_incumbent:
current_incumbent = l
return_dict['losses'].append(l)
return_dict['time_stamps'].append(t)
else:
return_dict['losses'].append(return_dict['losses'][-1])
return_dict['time_stamps'].append(t)
return return_dict.values()
def get_trajectories(args, global_min, path='regularized_evolution',
methods=['RE', 'RS']):
all_trajectories = {}
for m in methods:
dfs = []
for seed in range(500):
filename = os.path.join(path, m,
'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,
seed))
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
losses = [1 - x.test_accuracy - global_min for x in data]
times = np.array([x.training_time for x in data])
times = [np.sum(times[:i+1]) for i in range(len(times))]
if m in ['HB', 'BOHB']:
costs = np.array([x.budget for x in data])
costs = np.array(
[np.sum(costs[:i+1]) for i in range(len(costs))]
)
n = len(np.where(costs <= 280*108)[0])
times, losses = get_incumbent(losses[:n], times[:n])
else:
times, losses = get_incumbent(losses, times)
print(seed, ' MIN: ', min(losses))
df = pd.DataFrame({str(seed): losses}, index=times)
#embed()
dfs.append(df)
except FileNotFoundError:
break
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
continue
print(m, df.shape)
all_trajectories[m] = {
'time_stamps': np.array(df.index),
'losses': np.array(df.T)
}
return all_trajectories
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
# merge all tracjectories keeping all time steps
df = pd.DataFrame().join(pandas_data_frames, how='outer')
# forward fill to make it a propper step function
df=df.fillna(method='ffill')
if default_value is None:
# backward fill to replace the NaNs for the early times by
# the performance of a random configuration
df=df.fillna(method='bfill')
else:
df=df.fillna(default_value)
return(df)
def plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,
incumbent=None, show=True, linewidth=3, marker_size=10,
xscale='log', xlabel='wall clock time [s]', yscale='log',
ylabel=None, legend_loc = 'best', xlim=None, ylim=None,
plot_mean=True, labels={}, markers=markers, colors=colors,
figsize=(16,9)):
if regret:
if ylabel is None: ylabel = 'regret'
# find lowest performance in the data to update incumbent
if incumbent is None:
incumbent = np.inf
for tr in incumbent_trajectories.values():
incumbent = min(tr['losses'][:,-1].min(), incumbent)
print('incumbent value: ', incumbent)
for m,tr in incumbent_trajectories.items():
trajectory = np.copy(tr['losses'])
if (trajectory.shape[0] == 0): continue
if regret: trajectory -= incumbent
sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])
if plot_mean:
mean = trajectory.mean(axis=0)
else:
mean = np.median(trajectory,axis=0)
sem *= 1.253
if 'DARTS' in m or 'GDAS' in m:
ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,
color=colors[m], alpha=0.2)
ax.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
if axins is not None:
axins.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
return (fig, ax)
|
normal
|
{
"blob_id": "a757bbb9ad2f6f5bf04cdf4091b97841b8e40432",
"index": 6601,
"step-1": "<mask token>\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m, 'algo_{}_0_ssp_{}_seed_{}.obj'\n .format(m, args.space, seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [(1 - x.test_accuracy - global_min) for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i + 1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array([np.sum(costs[:i + 1]) for i in\n range(len(costs))])\n n = len(np.where(costs <= 280 * 108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n all_trajectories[m] = {'time_stamps': np.array(df.index), 'losses':\n np.array(df.T)}\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n df = pd.DataFrame().join(pandas_data_frames, how='outer')\n df = df.fillna(method='ffill')\n if default_value is None:\n df = df.fillna(method='bfill')\n else:\n df = df.fillna(default_value)\n return df\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10, xscale='log',\n xlabel='wall clock time [s]', yscale='log', ylabel=None, legend_loc=\n 'best', xlim=None, ylim=None, plot_mean=True, labels={}, markers=\n markers, colors=colors, figsize=(16, 9)):\n if regret:\n if ylabel is None:\n ylabel = 'regret'\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:, -1].min(), incumbent)\n print('incumbent value: ', incumbent)\n for m, tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if trajectory.shape[0] == 0:\n continue\n if regret:\n trajectory -= incumbent\n sem = np.sqrt(trajectory.var(axis=0, ddof=1) / tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory, axis=0)\n sem *= 1.253\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean - 2 * sem, mean + 2 *\n sem, color=colors[m], alpha=0.2)\n ax.plot(tr['time_stamps'], mean, label=labels.get(m, m), color=\n colors.get(m, None), linewidth=linewidth, marker=markers.get(m,\n None), markersize=marker_size, markevery=(0.1, 0.1))\n if axins is not None:\n axins.plot(tr['time_stamps'], mean, label=labels.get(m, m),\n color=colors.get(m, None), linewidth=linewidth, marker=\n markers.get(m, None), markersize=marker_size, markevery=(\n 0.1, 0.1))\n return fig, ax\n",
"step-2": "<mask token>\n\n\ndef get_incumbent(losses, time_stamps):\n return_dict = {'time_stamps': [], 'losses': []}\n current_incumbent = float('inf')\n incumbent_budget = -float('inf')\n for l, t in zip(losses, time_stamps):\n if l < current_incumbent:\n current_incumbent = l\n return_dict['losses'].append(l)\n return_dict['time_stamps'].append(t)\n else:\n return_dict['losses'].append(return_dict['losses'][-1])\n return_dict['time_stamps'].append(t)\n return return_dict.values()\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m, 'algo_{}_0_ssp_{}_seed_{}.obj'\n .format(m, args.space, seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [(1 - x.test_accuracy - global_min) for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i + 1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array([np.sum(costs[:i + 1]) for i in\n range(len(costs))])\n n = len(np.where(costs <= 280 * 108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n all_trajectories[m] = {'time_stamps': np.array(df.index), 'losses':\n np.array(df.T)}\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n df = pd.DataFrame().join(pandas_data_frames, how='outer')\n df = df.fillna(method='ffill')\n if default_value is None:\n df = df.fillna(method='bfill')\n else:\n df = df.fillna(default_value)\n return df\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10, xscale='log',\n xlabel='wall clock time [s]', yscale='log', ylabel=None, legend_loc=\n 'best', xlim=None, ylim=None, plot_mean=True, labels={}, markers=\n markers, colors=colors, figsize=(16, 9)):\n if regret:\n if ylabel is None:\n ylabel = 'regret'\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:, -1].min(), incumbent)\n print('incumbent value: ', incumbent)\n for m, tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if trajectory.shape[0] == 0:\n continue\n if regret:\n trajectory -= incumbent\n sem = np.sqrt(trajectory.var(axis=0, ddof=1) / tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory, axis=0)\n sem *= 1.253\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean - 2 * sem, mean + 2 *\n sem, color=colors[m], alpha=0.2)\n ax.plot(tr['time_stamps'], mean, label=labels.get(m, m), color=\n colors.get(m, None), linewidth=linewidth, marker=markers.get(m,\n None), markersize=marker_size, markevery=(0.1, 0.1))\n if axins is not None:\n axins.plot(tr['time_stamps'], mean, label=labels.get(m, m),\n color=colors.get(m, None), linewidth=linewidth, marker=\n markers.get(m, None), markersize=marker_size, markevery=(\n 0.1, 0.1))\n return fig, ax\n",
"step-3": "<mask token>\ncolors = {'BOHB-PC-DARTS': 'darkorange', 'BOHB-DARTS': 'dodgerblue',\n 'BOHB-GDAS': 'forestgreen', 'RE': 'crimson', 'RS': 'darkorchid', 'RL':\n 'sienna', 'TPE': 'deepskyblue', 'SMAC': 'violet', 'HB': 'darkgray',\n 'BOHB': 'gold'}\nmarkers = {'BOHB-DARTS': '^', 'BOHB-PC-DARTS': 'v', 'BOHB-GDAS': 'x', 'RS':\n 'D', 'RE': 'o', 'RL': 's', 'SMAC': 'h', 'HB': '>', 'BOHB': '*', 'TPE': '<'}\n\n\ndef get_incumbent(losses, time_stamps):\n return_dict = {'time_stamps': [], 'losses': []}\n current_incumbent = float('inf')\n incumbent_budget = -float('inf')\n for l, t in zip(losses, time_stamps):\n if l < current_incumbent:\n current_incumbent = l\n return_dict['losses'].append(l)\n return_dict['time_stamps'].append(t)\n else:\n return_dict['losses'].append(return_dict['losses'][-1])\n return_dict['time_stamps'].append(t)\n return return_dict.values()\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m, 'algo_{}_0_ssp_{}_seed_{}.obj'\n .format(m, args.space, seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [(1 - x.test_accuracy - global_min) for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i + 1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array([np.sum(costs[:i + 1]) for i in\n range(len(costs))])\n n = len(np.where(costs <= 280 * 108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n all_trajectories[m] = {'time_stamps': np.array(df.index), 'losses':\n np.array(df.T)}\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n df = pd.DataFrame().join(pandas_data_frames, how='outer')\n df = df.fillna(method='ffill')\n if default_value is None:\n df = df.fillna(method='bfill')\n else:\n df = df.fillna(default_value)\n return df\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10, xscale='log',\n xlabel='wall clock time [s]', yscale='log', ylabel=None, legend_loc=\n 'best', xlim=None, ylim=None, plot_mean=True, labels={}, markers=\n markers, colors=colors, figsize=(16, 9)):\n if regret:\n if ylabel is None:\n ylabel = 'regret'\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:, -1].min(), incumbent)\n print('incumbent value: ', incumbent)\n for m, tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if trajectory.shape[0] == 0:\n continue\n if regret:\n trajectory -= incumbent\n sem = np.sqrt(trajectory.var(axis=0, ddof=1) / tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory, axis=0)\n sem *= 1.253\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean - 2 * sem, mean + 2 *\n sem, color=colors[m], alpha=0.2)\n ax.plot(tr['time_stamps'], mean, label=labels.get(m, m), color=\n colors.get(m, None), linewidth=linewidth, marker=markers.get(m,\n None), markersize=marker_size, markevery=(0.1, 0.1))\n if axins is not None:\n axins.plot(tr['time_stamps'], mean, label=labels.get(m, m),\n color=colors.get(m, None), linewidth=linewidth, marker=\n markers.get(m, None), markersize=marker_size, markevery=(\n 0.1, 0.1))\n return fig, ax\n",
"step-4": "import os\nimport pickle\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython import embed\nfrom optimizers.utils_1 import Model_1, Architecture_1\nfrom optimizers.utils import Model, Architecture\ncolors = {'BOHB-PC-DARTS': 'darkorange', 'BOHB-DARTS': 'dodgerblue',\n 'BOHB-GDAS': 'forestgreen', 'RE': 'crimson', 'RS': 'darkorchid', 'RL':\n 'sienna', 'TPE': 'deepskyblue', 'SMAC': 'violet', 'HB': 'darkgray',\n 'BOHB': 'gold'}\nmarkers = {'BOHB-DARTS': '^', 'BOHB-PC-DARTS': 'v', 'BOHB-GDAS': 'x', 'RS':\n 'D', 'RE': 'o', 'RL': 's', 'SMAC': 'h', 'HB': '>', 'BOHB': '*', 'TPE': '<'}\n\n\ndef get_incumbent(losses, time_stamps):\n return_dict = {'time_stamps': [], 'losses': []}\n current_incumbent = float('inf')\n incumbent_budget = -float('inf')\n for l, t in zip(losses, time_stamps):\n if l < current_incumbent:\n current_incumbent = l\n return_dict['losses'].append(l)\n return_dict['time_stamps'].append(t)\n else:\n return_dict['losses'].append(return_dict['losses'][-1])\n return_dict['time_stamps'].append(t)\n return return_dict.values()\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m, 'algo_{}_0_ssp_{}_seed_{}.obj'\n .format(m, args.space, seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [(1 - x.test_accuracy - global_min) for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i + 1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array([np.sum(costs[:i + 1]) for i in\n range(len(costs))])\n n = len(np.where(costs <= 280 * 108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n all_trajectories[m] = {'time_stamps': np.array(df.index), 'losses':\n np.array(df.T)}\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n df = pd.DataFrame().join(pandas_data_frames, how='outer')\n df = df.fillna(method='ffill')\n if default_value is None:\n df = df.fillna(method='bfill')\n else:\n df = df.fillna(default_value)\n return df\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10, xscale='log',\n xlabel='wall clock time [s]', yscale='log', ylabel=None, legend_loc=\n 'best', xlim=None, ylim=None, plot_mean=True, labels={}, markers=\n markers, colors=colors, figsize=(16, 9)):\n if regret:\n if ylabel is None:\n ylabel = 'regret'\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:, -1].min(), incumbent)\n print('incumbent value: ', incumbent)\n for m, tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if trajectory.shape[0] == 0:\n continue\n if regret:\n trajectory -= incumbent\n sem = np.sqrt(trajectory.var(axis=0, ddof=1) / tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory, axis=0)\n sem *= 1.253\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean - 2 * sem, mean + 2 *\n sem, color=colors[m], alpha=0.2)\n ax.plot(tr['time_stamps'], mean, label=labels.get(m, m), color=\n colors.get(m, None), linewidth=linewidth, marker=markers.get(m,\n None), markersize=marker_size, markevery=(0.1, 0.1))\n if axins is not None:\n axins.plot(tr['time_stamps'], mean, label=labels.get(m, m),\n color=colors.get(m, None), linewidth=linewidth, marker=\n markers.get(m, None), markersize=marker_size, markevery=(\n 0.1, 0.1))\n return fig, ax\n",
"step-5": "import os\nimport pickle\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython import embed\n\nfrom optimizers.utils_1 import Model_1, Architecture_1\nfrom optimizers.utils import Model, Architecture\n\ncolors={\n 'BOHB-PC-DARTS': 'darkorange',\n 'BOHB-DARTS': 'dodgerblue',\n 'BOHB-GDAS' : 'forestgreen',\n 'RE': 'crimson',\n\t\t'RS': 'darkorchid',\n\t\t'RL': 'sienna',\n\t\t'TPE': 'deepskyblue',\n 'SMAC': 'violet',\n 'HB': 'darkgray',\n 'BOHB': 'gold'\n}\n\nmarkers={\n 'BOHB-DARTS': '^',\n 'BOHB-PC-DARTS': 'v',\n 'BOHB-GDAS' : 'x',\n 'RS': 'D',\n\t\t'RE': 'o',\n\t\t'RL': 's',\n\t\t'SMAC': 'h',\n 'HB': '>',\n 'BOHB': '*',\n 'TPE': '<'\n}\n\n\ndef get_incumbent(losses, time_stamps):\n return_dict = {'time_stamps': [],\n 'losses': [],\n }\n\n current_incumbent = float('inf')\n incumbent_budget = -float('inf')\n\n for l, t in zip(losses, time_stamps):\n if l < current_incumbent:\n current_incumbent = l\n return_dict['losses'].append(l)\n return_dict['time_stamps'].append(t)\n else:\n return_dict['losses'].append(return_dict['losses'][-1])\n return_dict['time_stamps'].append(t)\n return return_dict.values()\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m,\n 'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,\n seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [1 - x.test_accuracy - global_min for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i+1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array(\n [np.sum(costs[:i+1]) for i in range(len(costs))]\n )\n n = len(np.where(costs <= 280*108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n #embed()\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n\n all_trajectories[m] = {\n 'time_stamps': np.array(df.index),\n 'losses': np.array(df.T)\n }\n\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n\t# merge all tracjectories keeping all time steps\n\tdf = pd.DataFrame().join(pandas_data_frames, how='outer')\n\n\t# forward fill to make it a propper step function\n\tdf=df.fillna(method='ffill')\n\n\tif default_value is None:\n\t# backward fill to replace the NaNs for the early times by\n\t# the performance of a random configuration\n\t\tdf=df.fillna(method='bfill')\n\telse:\n\t\tdf=df.fillna(default_value)\n\n\treturn(df)\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10,\n xscale='log', xlabel='wall clock time [s]', yscale='log',\n ylabel=None, legend_loc = 'best', xlim=None, ylim=None,\n plot_mean=True, labels={}, markers=markers, colors=colors,\n figsize=(16,9)):\n\n if regret:\n if ylabel is None: ylabel = 'regret'\n\t\t# find lowest performance in the data to update incumbent\n\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:,-1].min(), incumbent)\n print('incumbent value: ', incumbent)\n\n for m,tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if (trajectory.shape[0] == 0): continue\n if regret: trajectory -= incumbent\n\n sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory,axis=0)\n sem *= 1.253\n\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,\n color=colors[m], alpha=0.2)\n\n ax.plot(tr['time_stamps'],mean,\n label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,\n marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))\n\n if axins is not None:\n axins.plot(tr['time_stamps'],mean,\n label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,\n marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))\n\n return (fig, ax)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def main():
a, b = map(int, input().split())
diff = abs(max(b, a) - min(a, b))
if diff % 2 != 0:
print("IMPOSSIBLE")
else:
bigger = max(a, b)
ans = bigger - (diff//2)
print(ans)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "f73cbc25152a63bb6552e2cd8272c67a1f4277ba",
"index": 9044,
"step-1": "<mask token>\n",
"step-2": "def main():\n a, b = map(int, input().split())\n diff = abs(max(b, a) - min(a, b))\n if diff % 2 != 0:\n print('IMPOSSIBLE')\n else:\n bigger = max(a, b)\n ans = bigger - diff // 2\n print(ans)\n\n\n<mask token>\n",
"step-3": "def main():\n a, b = map(int, input().split())\n diff = abs(max(b, a) - min(a, b))\n if diff % 2 != 0:\n print('IMPOSSIBLE')\n else:\n bigger = max(a, b)\n ans = bigger - diff // 2\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "def main():\n a, b = map(int, input().split())\n diff = abs(max(b, a) - min(a, b))\n if diff % 2 != 0:\n print(\"IMPOSSIBLE\")\n else:\n bigger = max(a, b)\n ans = bigger - (diff//2)\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(a, type(a))
print(a.attr('href'))
print(a.attr.href)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
html = """
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html">third item</a></li>
<li class="item-1 active"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul
</div>
"""
doc = pq(html)
a = doc('.item-0.active a')
print(a, type(a))
print(a.attr('href'))
print(a.attr.href)
<|reserved_special_token_1|>
from pyquery import PyQuery as pq
html = """
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html">third item</a></li>
<li class="item-1 active"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul
</div>
"""
doc = pq(html)
a = doc('.item-0.active a')
print(a, type(a))
print(a.attr('href'))
print(a.attr.href)
<|reserved_special_token_1|>
# coding: utf-8
from pyquery import PyQuery as pq
html = '''
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html">third item</a></li>
<li class="item-1 active"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul
</div>
'''
# 获取属性
# 第一种方法
doc = pq(html)
a = doc('.item-0.active a')
print(a, type(a))
print(a.attr('href'))
# 第二种方法
print(a.attr.href)
|
flexible
|
{
"blob_id": "02ab822dacb26d623a474fa45ebb034f9c1291b8",
"index": 1604,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a, type(a))\nprint(a.attr('href'))\nprint(a.attr.href)\n",
"step-3": "<mask token>\nhtml = \"\"\"\n <div id=\"container\">\n <ul class=\"list\">\n <li class=\"item-0\">first item</li>\n <li class=\"item-1\"><a href=\"link2.html\">second item</a></li>\n <li class=\"item-0 active\"><a href=\"link3.html\">third item</a></li>\n <li class=\"item-1 active\"><a href=\"link4.html\">fourth item</a></li>\n <li class=\"item-0\"><a href=\"link5.html\">fifth item</a></li>\n </ul\n </div>\n\"\"\"\ndoc = pq(html)\na = doc('.item-0.active a')\nprint(a, type(a))\nprint(a.attr('href'))\nprint(a.attr.href)\n",
"step-4": "from pyquery import PyQuery as pq\nhtml = \"\"\"\n <div id=\"container\">\n <ul class=\"list\">\n <li class=\"item-0\">first item</li>\n <li class=\"item-1\"><a href=\"link2.html\">second item</a></li>\n <li class=\"item-0 active\"><a href=\"link3.html\">third item</a></li>\n <li class=\"item-1 active\"><a href=\"link4.html\">fourth item</a></li>\n <li class=\"item-0\"><a href=\"link5.html\">fifth item</a></li>\n </ul\n </div>\n\"\"\"\ndoc = pq(html)\na = doc('.item-0.active a')\nprint(a, type(a))\nprint(a.attr('href'))\nprint(a.attr.href)\n",
"step-5": "# coding: utf-8\n\nfrom pyquery import PyQuery as pq\n\n\nhtml = '''\n <div id=\"container\">\n <ul class=\"list\">\n <li class=\"item-0\">first item</li>\n <li class=\"item-1\"><a href=\"link2.html\">second item</a></li>\n <li class=\"item-0 active\"><a href=\"link3.html\">third item</a></li>\n <li class=\"item-1 active\"><a href=\"link4.html\">fourth item</a></li>\n <li class=\"item-0\"><a href=\"link5.html\">fifth item</a></li>\n </ul\n </div>\n'''\n# 获取属性\n# 第一种方法\ndoc = pq(html)\na = doc('.item-0.active a')\nprint(a, type(a))\nprint(a.attr('href'))\n\n# 第二种方法\nprint(a.attr.href)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while numero_usuario < 0:
print(
'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '
)
numero_usuario = int(input(
'Ingrese un nùmero para empezar su tambaleada aventura '))
<|reserved_special_token_0|>
while pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:
if numero_usuario % 2 == 0:
pasos_adelante = pasos_adelante + 1
print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')
elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:
pasos_der = pasos_der + 1
pasos_izq = pasos_izq - 1
print('El pirata hizo', pasos_der, 'pasos a la derecha ')
elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:
pasos_izq = pasos_izq + 1
pasos_der = pasos_der - 1
print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')
aleatorio = randint(-10, 1000)
print('nùmero aleatorio', aleatorio)
numero_usuario = aleatorio
if pasos_adelante >= 15:
print(
' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')
elif pasos_der >= 5:
print(
'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('
)
elif pasos_izq >= 5:
print(
'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
numero_usuario = int(input(
'Ingrese un nùmero para empezar su tambaleada aventura '))
while numero_usuario < 0:
print(
'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '
)
numero_usuario = int(input(
'Ingrese un nùmero para empezar su tambaleada aventura '))
pasos_izq = 3
pasos_der = 3
pasos_adelante = 0
while pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:
if numero_usuario % 2 == 0:
pasos_adelante = pasos_adelante + 1
print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')
elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:
pasos_der = pasos_der + 1
pasos_izq = pasos_izq - 1
print('El pirata hizo', pasos_der, 'pasos a la derecha ')
elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:
pasos_izq = pasos_izq + 1
pasos_der = pasos_der - 1
print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')
aleatorio = randint(-10, 1000)
print('nùmero aleatorio', aleatorio)
numero_usuario = aleatorio
if pasos_adelante >= 15:
print(
' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')
elif pasos_der >= 5:
print(
'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('
)
elif pasos_izq >= 5:
print(
'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from random import randint
numero_usuario = int(input(
'Ingrese un nùmero para empezar su tambaleada aventura '))
while numero_usuario < 0:
print(
'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '
)
numero_usuario = int(input(
'Ingrese un nùmero para empezar su tambaleada aventura '))
pasos_izq = 3
pasos_der = 3
pasos_adelante = 0
while pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:
if numero_usuario % 2 == 0:
pasos_adelante = pasos_adelante + 1
print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')
elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:
pasos_der = pasos_der + 1
pasos_izq = pasos_izq - 1
print('El pirata hizo', pasos_der, 'pasos a la derecha ')
elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:
pasos_izq = pasos_izq + 1
pasos_der = pasos_der - 1
print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')
aleatorio = randint(-10, 1000)
print('nùmero aleatorio', aleatorio)
numero_usuario = aleatorio
if pasos_adelante >= 15:
print(
' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')
elif pasos_der >= 5:
print(
'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('
)
elif pasos_izq >= 5:
print(
'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('
)
<|reserved_special_token_1|>
""""Pirata barba Negra ( màs de 2 pasos a las izquierda o a la derecha y se cae):
rampa para subir a su barco (5 pasos de ancho y 15 de largo")leer por teclado un valor entero.
a) si el entero es par 1 paso hacia adelante
b)si el entero es impar , pero el entero - 1 es divisible por 4, el pirata da un paso a la derecha
c)En otro caso , el pirata da un paso a la izquierda
d)utilizar un generador de numeros pseudo aleatorios para generar un nuevo entero y repetir a la partir del paso a
Condiciones de terminacion:
** introducciòn de un nùmero negativo ( es de suponer que el pirata se durmiò sobre la rampa)
**El pirata cae por un costado de la rampa y se ahoga
**El pirata logra abordar a salvo su barco
Haga un programa que exhiba el avance del pirata en cada paso"""
from random import randint
numero_usuario =int(input("Ingrese un nùmero para empezar su tambaleada aventura "))
while numero_usuario<0:
print("Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero ")
numero_usuario =int(input("Ingrese un nùmero para empezar su tambaleada aventura "))
pasos_izq =3 #por la posicion inicial en la tabla
pasos_der= 3
pasos_adelante=0
#considerar punto en la tabla
while pasos_adelante <15 and pasos_der<5 and pasos_izq<5:
if numero_usuario%2 ==0:
pasos_adelante =pasos_adelante+1
#para el while validar que iguale o supere lo pasos_adelante >=15
print("El pirata avanzó" ,pasos_adelante, "pasos hacia adelante")
elif numero_usuario %2 !=0 and (numero_usuario-1)%4==0:
pasos_der= pasos_der+1
pasos_izq=pasos_izq-1
#para el while validar que iguale o supere lo pasos_der>2
print("El pirata hizo" ,pasos_der, "pasos a la derecha ")
elif numero_usuario %2 !=0 and (numero_usuario-1)%4!=0:
pasos_izq=pasos_izq+1
pasos_der= pasos_der-1
#para el while validar que iguale o supere lo pasos_izq>2
print("El pirata hizo" ,pasos_izq, "pasos a la izquierda ")
aleatorio=randint(-10,1000)
print("nùmero aleatorio",aleatorio)
numero_usuario=aleatorio
if pasos_adelante >=15:
print(" Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!")
elif pasos_der>=5:
print("El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :(")
elif pasos_izq>=5:
print("El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :(")
|
flexible
|
{
"blob_id": "1829bd8e87c470a71fea97dd3a47c30477b6e6f1",
"index": 3109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile numero_usuario < 0:\n print(\n 'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '\n )\n numero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\n<mask token>\nwhile pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:\n if numero_usuario % 2 == 0:\n pasos_adelante = pasos_adelante + 1\n print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:\n pasos_der = pasos_der + 1\n pasos_izq = pasos_izq - 1\n print('El pirata hizo', pasos_der, 'pasos a la derecha ')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:\n pasos_izq = pasos_izq + 1\n pasos_der = pasos_der - 1\n print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')\n aleatorio = randint(-10, 1000)\n print('nùmero aleatorio', aleatorio)\n numero_usuario = aleatorio\nif pasos_adelante >= 15:\n print(\n ' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')\nelif pasos_der >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('\n )\nelif pasos_izq >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('\n )\n",
"step-3": "<mask token>\nnumero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\nwhile numero_usuario < 0:\n print(\n 'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '\n )\n numero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\npasos_izq = 3\npasos_der = 3\npasos_adelante = 0\nwhile pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:\n if numero_usuario % 2 == 0:\n pasos_adelante = pasos_adelante + 1\n print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:\n pasos_der = pasos_der + 1\n pasos_izq = pasos_izq - 1\n print('El pirata hizo', pasos_der, 'pasos a la derecha ')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:\n pasos_izq = pasos_izq + 1\n pasos_der = pasos_der - 1\n print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')\n aleatorio = randint(-10, 1000)\n print('nùmero aleatorio', aleatorio)\n numero_usuario = aleatorio\nif pasos_adelante >= 15:\n print(\n ' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')\nelif pasos_der >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('\n )\nelif pasos_izq >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('\n )\n",
"step-4": "<mask token>\nfrom random import randint\nnumero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\nwhile numero_usuario < 0:\n print(\n 'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '\n )\n numero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\npasos_izq = 3\npasos_der = 3\npasos_adelante = 0\nwhile pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:\n if numero_usuario % 2 == 0:\n pasos_adelante = pasos_adelante + 1\n print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:\n pasos_der = pasos_der + 1\n pasos_izq = pasos_izq - 1\n print('El pirata hizo', pasos_der, 'pasos a la derecha ')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:\n pasos_izq = pasos_izq + 1\n pasos_der = pasos_der - 1\n print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')\n aleatorio = randint(-10, 1000)\n print('nùmero aleatorio', aleatorio)\n numero_usuario = aleatorio\nif pasos_adelante >= 15:\n print(\n ' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')\nelif pasos_der >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('\n )\nelif pasos_izq >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('\n )\n",
"step-5": "\"\"\"\"Pirata barba Negra ( màs de 2 pasos a las izquierda o a la derecha y se cae): \nrampa para subir a su barco (5 pasos de ancho y 15 de largo\")leer por teclado un valor entero.\na) si el entero es par 1 paso hacia adelante\nb)si el entero es impar , pero el entero - 1 es divisible por 4, el pirata da un paso a la derecha\nc)En otro caso , el pirata da un paso a la izquierda\nd)utilizar un generador de numeros pseudo aleatorios para generar un nuevo entero y repetir a la partir del paso a\nCondiciones de terminacion:\n** introducciòn de un nùmero negativo ( es de suponer que el pirata se durmiò sobre la rampa)\n**El pirata cae por un costado de la rampa y se ahoga\n**El pirata logra abordar a salvo su barco\nHaga un programa que exhiba el avance del pirata en cada paso\"\"\"\n\nfrom random import randint\n\nnumero_usuario =int(input(\"Ingrese un nùmero para empezar su tambaleada aventura \"))\nwhile numero_usuario<0:\n print(\"Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero \")\n numero_usuario =int(input(\"Ingrese un nùmero para empezar su tambaleada aventura \"))\n\npasos_izq =3 #por la posicion inicial en la tabla\npasos_der= 3\npasos_adelante=0\n#considerar punto en la tabla\n\nwhile pasos_adelante <15 and pasos_der<5 and pasos_izq<5:\n if numero_usuario%2 ==0:\n pasos_adelante =pasos_adelante+1\n #para el while validar que iguale o supere lo pasos_adelante >=15\n print(\"El pirata avanzó\" ,pasos_adelante, \"pasos hacia adelante\")\n elif numero_usuario %2 !=0 and (numero_usuario-1)%4==0:\n pasos_der= pasos_der+1\n pasos_izq=pasos_izq-1\n #para el while validar que iguale o supere lo pasos_der>2\n print(\"El pirata hizo\" ,pasos_der, \"pasos a la derecha \")\n elif numero_usuario %2 !=0 and (numero_usuario-1)%4!=0:\n pasos_izq=pasos_izq+1\n pasos_der= pasos_der-1\n #para el while validar que iguale o supere lo pasos_izq>2\n print(\"El pirata hizo\" ,pasos_izq, \"pasos a la izquierda \")\n aleatorio=randint(-10,1000) \n print(\"nùmero aleatorio\",aleatorio)\n numero_usuario=aleatorio\n\nif pasos_adelante >=15: \n print(\" Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!\")\nelif pasos_der>=5:\n print(\"El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :(\")\nelif pasos_izq>=5:\n print(\"El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :(\") ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Hi, I am Nag')
|
flexible
|
{
"blob_id": "0ca751e050244fd85c8110d02d5e7a79eb449ada",
"index": 8542,
"step-1": "<mask token>\n",
"step-2": "print('Hi, I am Nag')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class Step:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Step:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self) ->str:
return f'Step: {{action: {self.action.__str__()}}}'
<|reserved_special_token_1|>
class Step:
def __init__(self, action):
self.action = action
<|reserved_special_token_0|>
def __repr__(self) ->str:
return f'Step: {{action: {self.action.__str__()}}}'
<|reserved_special_token_1|>
class Step:
def __init__(self, action):
self.action = action
def __str__(self) ->str:
return f'Step: {{action: {self.action.__str__()}}}'
def __repr__(self) ->str:
return f'Step: {{action: {self.action.__str__()}}}'
|
flexible
|
{
"blob_id": "9adff5da4e26088def9f0e32aa712a1f2b0336ba",
"index": 925,
"step-1": "class Step:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Step:\n <mask token>\n <mask token>\n\n def __repr__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n",
"step-3": "class Step:\n\n def __init__(self, action):\n self.action = action\n <mask token>\n\n def __repr__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n",
"step-4": "class Step:\n\n def __init__(self, action):\n self.action = action\n\n def __str__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n\n def __repr__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
list_angle_list = RmList()
variable_flag = 0
variable_i = 0
def user_defined_shoot():
global variable_flag
global variable_i
global list_angle_list
variable_i = 1
for count in range(3):
gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])
gun_ctrl.fire_once()
variable_i = variable_i + 2
time.sleep(0.2)
def user_defined_storage_angle():
global variable_flag
global variable_i
global list_angle_list
led_ctrl.gun_led_on()
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_yaw))
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_pitch))
time.sleep(5)
led_ctrl.gun_led_off()
def start():
global variable_flag
global variable_i
global list_angle_list
robot_ctrl.set_mode(rm_define.robot_mode_free)
gimbal_ctrl.set_rotate_speed(180)
vision_ctrl.enable_detection(rm_define.vision_detection_marker)
vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)
time.sleep(5)
user_defined_storage_angle()
vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)
time.sleep(3)
user_defined_storage_angle()
user_defined_shoot()
|
normal
|
{
"blob_id": "012e4112970a07559f27fa2127cdffcc557a1566",
"index": 4638,
"step-1": "<mask token>\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\ndef user_defined_storage_angle():\n global variable_flag\n global variable_i\n global list_angle_list\n led_ctrl.gun_led_on()\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_yaw))\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_pitch))\n time.sleep(5)\n led_ctrl.gun_led_off()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\ndef user_defined_storage_angle():\n global variable_flag\n global variable_i\n global list_angle_list\n led_ctrl.gun_led_on()\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_yaw))\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_pitch))\n time.sleep(5)\n led_ctrl.gun_led_off()\n\n\ndef start():\n global variable_flag\n global variable_i\n global list_angle_list\n robot_ctrl.set_mode(rm_define.robot_mode_free)\n gimbal_ctrl.set_rotate_speed(180)\n vision_ctrl.enable_detection(rm_define.vision_detection_marker)\n vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)\n time.sleep(5)\n user_defined_storage_angle()\n vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)\n time.sleep(3)\n user_defined_storage_angle()\n user_defined_shoot()\n",
"step-4": "list_angle_list = RmList()\nvariable_flag = 0\nvariable_i = 0\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\ndef user_defined_storage_angle():\n global variable_flag\n global variable_i\n global list_angle_list\n led_ctrl.gun_led_on()\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_yaw))\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_pitch))\n time.sleep(5)\n led_ctrl.gun_led_off()\n\n\ndef start():\n global variable_flag\n global variable_i\n global list_angle_list\n robot_ctrl.set_mode(rm_define.robot_mode_free)\n gimbal_ctrl.set_rotate_speed(180)\n vision_ctrl.enable_detection(rm_define.vision_detection_marker)\n vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)\n time.sleep(5)\n user_defined_storage_angle()\n vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)\n time.sleep(3)\n user_defined_storage_angle()\n user_defined_shoot()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = '../NEURON/granule.hoc'
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NEURON.json'
self.currentRange = -0.01, 0.1
<|reserved_special_token_0|>
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = (
'../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NeuroML.json'
self.id = 'Granule_0_110821'
self.currentRange = -0.01, 0.1
def prepare(self, h):
h.load_file(self.id + '.hoc')
cell = getattr(h, self.id)()
h.celsius = 24
return cell
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = '../NEURON/granule.hoc'
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NEURON.json'
self.currentRange = -0.01, 0.1
def prepare(self, h):
sys.path.append(os.getcwd())
import customsim
import modeldata
customsim.setup(1, 1)
model = modeldata.getmodel()
cell = model.granules[110821]
h.celsius = 24
return cell
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = (
'../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NeuroML.json'
self.id = 'Granule_0_110821'
self.currentRange = -0.01, 0.1
def prepare(self, h):
h.load_file(self.id + '.hoc')
cell = getattr(h, self.id)()
h.celsius = 24
return cell
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '..')
sys.path.insert(0, '../NEURON')
<|reserved_special_token_0|>
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = '../NEURON/granule.hoc'
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NEURON.json'
self.currentRange = -0.01, 0.1
def prepare(self, h):
sys.path.append(os.getcwd())
import customsim
import modeldata
customsim.setup(1, 1)
model = modeldata.getmodel()
cell = model.granules[110821]
h.celsius = 24
return cell
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = (
'../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NeuroML.json'
self.id = 'Granule_0_110821'
self.currentRange = -0.01, 0.1
def prepare(self, h):
h.load_file(self.id + '.hoc')
cell = getattr(h, self.id)()
h.celsius = 24
return cell
<|reserved_special_token_1|>
import sys, os
sys.path.insert(0, '..')
sys.path.insert(0, '../NEURON')
from tests.cells.NEURONCellTest import NEURONCellTest
from tests.cells.NeuroMLCellTest import NeuroMLCellTest
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = '../NEURON/granule.hoc'
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NEURON.json'
self.currentRange = -0.01, 0.1
def prepare(self, h):
sys.path.append(os.getcwd())
import customsim
import modeldata
customsim.setup(1, 1)
model = modeldata.getmodel()
cell = model.granules[110821]
h.celsius = 24
return cell
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = (
'../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')
self.label = 'granule'
self.resultsFile = 'results/cells/granule/NeuroML.json'
self.id = 'Granule_0_110821'
self.currentRange = -0.01, 0.1
def prepare(self, h):
h.load_file(self.id + '.hoc')
cell = getattr(h, self.id)()
h.celsius = 24
return cell
<|reserved_special_token_1|>
import sys, os; sys.path.insert(0,'..'); sys.path.insert(0,'../NEURON');
from tests.cells.NEURONCellTest import NEURONCellTest
from tests.cells.NeuroMLCellTest import NeuroMLCellTest
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = "../NEURON/granule.hoc"
self.label = "granule"
self.resultsFile = "results/cells/granule/NEURON.json"
self.currentRange = (-0.01, 0.1)
def prepare(self, h):
# Build the network with 1GC
sys.path.append(os.getcwd())
import customsim
import modeldata
customsim.setup(1, 1)
model = modeldata.getmodel()
cell = model.granules[110821] # The GC of the first MC
h.celsius = 24
return cell
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = "../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml"
self.label = "granule"
self.resultsFile = "results/cells/granule/NeuroML.json"
self.id = "Granule_0_110821"
self.currentRange = (-0.01, 0.1)
def prepare(self, h):
# Load the cell hoc
h.load_file(self.id+".hoc")
cell = getattr(h,self.id)()
h.celsius = 24
return cell
|
flexible
|
{
"blob_id": "6dbafbcf126c37edb2187eb28c01e2c1125c1c64",
"index": 7134,
"step-1": "<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n <mask token>\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-2": "<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-3": "<mask token>\nsys.path.insert(0, '..')\nsys.path.insert(0, '../NEURON')\n<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-4": "import sys, os\nsys.path.insert(0, '..')\nsys.path.insert(0, '../NEURON')\nfrom tests.cells.NEURONCellTest import NEURONCellTest\nfrom tests.cells.NeuroMLCellTest import NeuroMLCellTest\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-5": "import sys, os; sys.path.insert(0,'..'); sys.path.insert(0,'../NEURON');\r\nfrom tests.cells.NEURONCellTest import NEURONCellTest\r\nfrom tests.cells.NeuroMLCellTest import NeuroMLCellTest\r\n\r\nclass NEURON(NEURONCellTest):\r\n\r\n def __init__(self):\r\n super(NEURON, self).__init__()\r\n\r\n self.path = \"../NEURON/granule.hoc\"\r\n self.label = \"granule\"\r\n self.resultsFile = \"results/cells/granule/NEURON.json\"\r\n self.currentRange = (-0.01, 0.1)\r\n\r\n def prepare(self, h):\r\n\r\n # Build the network with 1GC\r\n sys.path.append(os.getcwd())\r\n import customsim\r\n import modeldata\r\n customsim.setup(1, 1)\r\n model = modeldata.getmodel()\r\n cell = model.granules[110821] # The GC of the first MC\r\n\r\n h.celsius = 24\r\n\r\n return cell\r\n\r\nclass NeuroML(NeuroMLCellTest):\r\n def __init__(self):\r\n super(NeuroML, self).__init__()\r\n\r\n self.path = \"../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml\"\r\n self.label = \"granule\"\r\n self.resultsFile = \"results/cells/granule/NeuroML.json\"\r\n self.id = \"Granule_0_110821\"\r\n self.currentRange = (-0.01, 0.1)\r\n\r\n def prepare(self, h):\r\n # Load the cell hoc\r\n h.load_file(self.id+\".hoc\")\r\n\r\n cell = getattr(h,self.id)()\r\n\r\n h.celsius = 24\r\n\r\n return cell\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
/home/co/Documents/ImageClassifier/tensorflow/tensorflow/contrib/tfprof/__init__.py
|
normal
|
{
"blob_id": "ca0616694b30f69263db48282bf8b8c130de0fbb",
"index": 8774,
"step-1": "/home/co/Documents/ImageClassifier/tensorflow/tensorflow/contrib/tfprof/__init__.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import grp
import logging
import os
import pwd
import re
import socket
import time
from threading import Thread
import psutil
from psutil import NoSuchProcess
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import sh
from sh import ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import EnvCommand, CommandError
from s2e_env.utils import repos
from s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \
translate_image_name
logger = logging.getLogger('image_build')
def _get_user_groups(user_name):
"""
Get a list of groups for the user ``user_name``.
"""
groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]
gid = pwd.getpwnam(user_name).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
def _user_belongs_to(group_name):
"""
Check that the current user belongs to the ``group_name`` group.
"""
user_name = _get_user_name()
groups = _get_user_groups(user_name)
return group_name in groups
def _raise_group_error(group_name):
raise CommandError(f'You must belong to the {group_name} group in order to build '
'images. Please run the following command, then logout '
'and login:\n\n'
f'\tsudo usermod -a -G {group_name} $(whoami)')
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker')
def _check_groups_kvm():
"""Being member of KVM is required only when using KVM to build images"""
if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):
_raise_group_error('kvm')
def _check_virtualbox():
"""
Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must
*not* be running together with S2E.
"""
# Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679
# to avoid race conditions
for proc in psutil.process_iter():
try:
if proc.name() == 'VBoxHeadless':
raise CommandError('S2E uses KVM to build images. VirtualBox '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VirtualBox VMs and try again.')
except NoSuchProcess:
pass
def _check_vmware():
"""
Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must
*not* be running together with S2E.
"""
for proc in psutil.process_iter():
try:
if proc.name() == 'vmware-vmx':
raise CommandError('S2E uses KVM to build images. VMware '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VMware VMs and try again.')
except NoSuchProcess:
pass
def _check_kvm():
"""
Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.
"""
if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):
raise CommandError('KVM interface not found - check that /dev/kvm '
'exists. Alternatively, you can disable KVM (-n '
'option) or download pre-built images (-d option)')
def _check_vmlinux():
"""
Check that /boot/vmlinux* files are readable. This is important for guestfish.
"""
try:
for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):
with open(f, 'rb'):
pass
except IOError:
raise CommandError('Make sure that the kernels in /boot are readable. '
'This is required for guestfish. Please run the '
'following command:\n\n'
'sudo chmod ugo+r /boot/vmlinu*') from None
# pylint: disable=no-member
def _check_cow(image_dir):
"""
Check that the file system that stores guest images supports copy-on-write.
"""
try:
src = f'{image_dir}/.cowcheck'
dst = f'{image_dir}/.cowcheck1'
sh.touch(src)
sh.cp('--reflink=always', src, dst)
return True
except Exception:
warn_msg = f"""
Copy-on-write check failed.
The file system where images are stored ({image_dir}) does not support copy-on-write.
It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage
location for S2E images, as this can save up to 60% of disk space. The building process checkpoints
intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.
How to upgrade:
1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).
Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.
2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)
3. Delete the "images" folder in your S2E environment
4. Create in your S2E environment a symbolic link called "images" to the directory you created in step 2
"""
logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))
return False
finally:
sh.rm('-f', src)
sh.rm('-f', dst)
def _raise_invalid_image(image_name):
raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '
'to list available images')
def _get_base_image_and_app(image_name):
x = image_name.split('/')
if len(x) == 1:
return x[0], None
if len(x) == 2:
return x
raise CommandError(f'Invalid image name {image_name}')
def _has_app_image(image_names):
for name in image_names:
if '/' in name:
return True
return False
def _check_product_keys(image_descriptors, image_names):
missing_keys = []
for image_name in image_names:
image = image_descriptors[image_name]
if 'product_key' in image:
if not image['product_key']:
missing_keys.append(image_name)
ios = image_descriptors[image_name].get('os', {})
if 'product_key' in ios:
if not ios['product_key']:
missing_keys.append(image_name)
if missing_keys:
logger.error('The following images require a product key:')
for image in missing_keys:
logger.error(' * %s', image)
raise CommandError('Please update images.json and/or apps.json.')
def _check_iso(templates, app_templates, iso_dir, image_names):
for image_name in image_names:
base_image, app_name = _get_base_image_and_app(image_name)
descriptors = [templates[base_image]]
if app_name:
descriptors.append(app_templates[app_name])
for desc in descriptors:
iso = desc.get('iso', {})
if iso.get('url', ''):
continue
name = iso.get('name', '')
if not name:
continue
if not iso_dir:
raise CommandError(
'Please use the --iso-dir option to specify the path '
f'to a folder that contains {name}'
)
path = os.path.join(iso_dir, name)
if not os.path.exists(path):
raise CommandError(f'The image {image_name} requires {path}, which could not be found')
def _is_port_available(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def _start_ftp_server(image_path, port):
authorizer = DummyAuthorizer()
authorizer.add_anonymous(image_path, perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = '10.0.2.2'
# QEMU slirp won't let the guest reconnect if timeout happens, so we disable it
handler.timeout = None
server = FTPServer(("127.0.0.1", port), handler)
thread = Thread(target=_run_ftp_server, args=[server])
thread.daemon = True
thread.start()
time.sleep(1)
return server
def _run_ftp_server(server):
try:
server.serve_forever()
finally:
logger.info('FTP server terminated')
server.close_all()
def _get_archive_rules(image_path, rule_names):
if _has_app_image(rule_names):
raise CommandError('Building archives of app images is not supported yet')
archive_rules = []
for r in rule_names:
archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))
logger.info('The following archives will be built:')
for a in archive_rules:
logger.info(' * %s', a)
return archive_rules
def _download_images(image_path, image_names, templates):
if _has_app_image(image_names):
raise CommandError('Downloading of app images is not supported yet')
image_downloader = ImageDownloader(templates)
image_downloader.download_images(image_names, image_path)
logger.info('Successfully downloaded images: %s', ', '.join(image_names))
class Command(EnvCommand):
"""
Builds an image.
"""
help = 'Build an image.'
def __init__(self):
super().__init__()
self._headless = True
self._use_kvm = True
self._num_cores = 1
self._has_cow = False
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('name',
help='The name of the image to build. If empty,'
' shows available images', nargs='*')
parser.add_argument('-g', '--gui', action='store_true',
help='Display QEMU GUI during image build')
parser.add_argument('-c', '--cores', required=False, default=2,
type=int,
help='The number of cores used when building the '
'VM image. Defaults to 2')
parser.add_argument('-x', '--clean', action='store_true',
help='Deletes all images and rebuild them from '
'scratch')
parser.add_argument('-a', '--archive', action='store_true',
help='Creates an archive for the specified image')
parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,
help='Port for the internal FTP server to receive files from guest VMs during build')
parser.add_argument('-d', '--download', action='store_true',
help='Download image from the repository instead '
'of building it')
parser.add_argument('-i', '--iso-dir',
help='Path to folder that stores ISO files of Windows images')
parser.add_argument('-n', '--no-kvm', action='store_true',
help='Disable KVM during image build')
def handle(self, *args, **options):
# If DISPLAY is missing, don't use headless mode
if options['gui']:
self._headless = False
# If KVM has been explicitly disabled, don't use it during the build
if options['no_kvm']:
self._use_kvm = False
self._num_cores = options['cores']
# The path could have been deleted by a previous clean
if not os.path.exists(self.image_path()):
os.makedirs(self.image_path())
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
if options['clean']:
self._invoke_make(img_build_dir, ['clean'])
return
image_names = options['name']
templates = get_image_templates(img_build_dir)
app_templates = get_app_templates(img_build_dir)
images, image_groups, image_descriptors = get_all_images(templates, app_templates)
if not image_names:
self._print_image_list(images, image_groups, image_descriptors)
print('\nRun ``s2e image_build <name>`` to build an image. '
'Note that you must run ``s2e build`` **before** building '
'an image')
return
image_names = translate_image_name(images, image_groups, image_names)
logger.info('The following images will be built:')
for image in image_names:
logger.info(' * %s', image)
if options['download']:
_download_images(self.image_path(), image_names, templates)
return
rule_names = image_names
if options['archive']:
rule_names = _get_archive_rules(self.image_path(), image_names)
iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None
# Check for optional product keys and iso directories.
# These may or may not be required, depending on the set of images.
_check_product_keys(image_descriptors, image_names)
_check_iso(templates, app_templates, iso_dir, image_names)
if self._use_kvm:
_check_kvm()
_check_groups_kvm()
_check_groups_docker()
_check_vmlinux()
self._has_cow = _check_cow(self.image_path())
if self._use_kvm:
_check_virtualbox()
_check_vmware()
if not _is_port_available(options['ftp_port']):
raise CommandError(f'localhost:{options["ftp_port"]} is not available. Check that the port is free or '
'specify a port with --ftp-port')
# Clone kernel if needed.
# This is necessary if the s2e env has been initialized with -b flag.
self._clone_kernel()
server = _start_ftp_server(self.image_path(), options['ftp_port'])
self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)
logger.success('Built image(s) \'%s\'', ' '.join(image_names))
server.close_all()
def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):
env = os.environ.copy()
env['S2E_INSTALL_ROOT'] = self.install_path()
env['S2E_LINUX_KERNELS_ROOT'] = \
self.source_path(CONSTANTS['repos']['images']['linux'])
env['OUTDIR'] = self.image_path()
env['QEMU_FTP_PORT'] = str(ftp_port)
env['ISODIR'] = iso_dir if iso_dir else ''
env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'
logger.debug('Invoking makefile with:')
logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])
logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])
logger.debug('export OUTDIR=%s', env['OUTDIR'])
logger.debug('export ISODIR=%s', env.get('ISODIR', ''))
logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))
if self._headless:
logger.warning('Image creation will run in headless mode. '
'Use --gui to see graphic output for debugging')
else:
env['GRAPHICS'] = ''
if not self._use_kvm:
env['QEMU_KVM'] = ''
logger.warning('Image build without KVM. This will be slow')
try:
make = sh.Command('make').bake(file=os.path.join(img_build_dir,
'Makefile'),
directory=self.image_path(),
_env=env, _fg=True)
make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)
make_image(sorted(rule_names))
except ErrorReturnCode as e:
raise CommandError(e) from e
def _clone_kernel(self):
kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])
if os.path.exists(kernels_root):
logger.info('Kernel repository already exists in %s', kernels_root)
return
logger.info('Cloning kernels repository to %s', kernels_root)
kernels_repo = CONSTANTS['repos']['images']['linux']
repos.git_clone_to_source(self.env_path(), kernels_repo)
def _print_image_list(self, images, image_groups, image_descriptors):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
templates = get_image_templates(img_build_dir)
if not templates:
images_json_path = os.path.join(img_build_dir, 'images.json')
raise CommandError('No images available to build. Make sure that '
f'{images_json_path} exists and is valid')
def get_max_len(lst):
ret = 0
for item in lst:
if len(item) > ret:
ret = len(item)
return ret
print('Available image groups:')
max_group_len = get_max_len(image_groups)
for group in image_groups:
print(f' * {group:{max_group_len}} - Build {group} images')
print('\nAvailable images:')
max_image_len = get_max_len(images)
for image in sorted(images):
print(f' * {image:{max_image_len}} - {image_descriptors[image]["name"]}')
def _print_apps_list(self):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
app_templates = get_app_templates(img_build_dir)
if not app_templates:
apps_json_path = os.path.join(img_build_dir, 'apps.json')
raise CommandError('No apps available to build. Make sure that '
f'{apps_json_path} exists and is valid')
print('Available applications:')
for app_template, desc in sorted(app_templates.items()):
for base_image in desc['base_images']:
print(f' * {base_image}/{app_template} - {desc["name"]}')
|
normal
|
{
"blob_id": "e5921edef3d3c56a73f2674f483ea4d1f3577629",
"index": 5186,
"step-1": "<mask token>\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\n<mask token>\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\n<mask token>\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\n<mask token>\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-2": "<mask token>\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\n<mask token>\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError(\n \"\"\"Make sure that the kernels in /boot are readable. This is required for guestfish. Please run the following command:\n\nsudo chmod ugo+r /boot/vmlinu*\"\"\"\n ) from None\n\n\n<mask token>\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(\n f'Invalid image name: {image_name}. Run ``s2e image_build`` to list available images'\n )\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\n<mask token>\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n handler.timeout = None\n server = FTPServer(('127.0.0.1', port), handler)\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n return server\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-3": "<mask token>\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\n<mask token>\n\n\ndef _check_groups_kvm():\n \"\"\"Being member of KVM is required only when using KVM to build images\"\"\"\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')\n\n\n<mask token>\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError(\n \"\"\"Make sure that the kernels in /boot are readable. This is required for guestfish. Please run the following command:\n\nsudo chmod ugo+r /boot/vmlinu*\"\"\"\n ) from None\n\n\ndef _check_cow(image_dir):\n \"\"\"\n Check that the file system that stores guest images supports copy-on-write.\n \"\"\"\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub('^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(\n f'Invalid image name: {image_name}. Run ``s2e image_build`` to list available images'\n )\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\n<mask token>\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n handler.timeout = None\n server = FTPServer(('127.0.0.1', port), handler)\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n return server\n\n\n<mask token>\n\n\ndef _get_archive_rules(image_path, rule_names):\n if _has_app_image(rule_names):\n raise CommandError(\n 'Building archives of app images is not supported yet')\n archive_rules = []\n for r in rule_names:\n archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))\n logger.info('The following archives will be built:')\n for a in archive_rules:\n logger.info(' * %s', a)\n return archive_rules\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-4": "<mask token>\n\n\ndef _get_user_groups(user_name):\n \"\"\"\n Get a list of groups for the user ``user_name``.\n \"\"\"\n groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]\n gid = pwd.getpwnam(user_name).pw_gid\n groups.append(grp.getgrgid(gid).gr_name)\n return groups\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\ndef _raise_group_error(group_name):\n raise CommandError(\n f\"\"\"You must belong to the {group_name} group in order to build images. Please run the following command, then logout and login:\n\n\tsudo usermod -a -G {group_name} $(whoami)\"\"\"\n )\n\n\ndef _check_groups_docker():\n \"\"\"\n Check that the current user belongs to the required groups to both run S2E and build S2E images.\n \"\"\"\n if not _user_belongs_to('docker'):\n _raise_group_error('docker')\n\n\ndef _check_groups_kvm():\n \"\"\"Being member of KVM is required only when using KVM to build images\"\"\"\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')\n\n\ndef _check_virtualbox():\n \"\"\"\n Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'VBoxHeadless':\n raise CommandError(\n 'S2E uses KVM to build images. VirtualBox is currently running, which is not compatible with KVM. Please close all VirtualBox VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError(\n \"\"\"Make sure that the kernels in /boot are readable. This is required for guestfish. Please run the following command:\n\nsudo chmod ugo+r /boot/vmlinu*\"\"\"\n ) from None\n\n\ndef _check_cow(image_dir):\n \"\"\"\n Check that the file system that stores guest images supports copy-on-write.\n \"\"\"\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub('^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(\n f'Invalid image name: {image_name}. Run ``s2e image_build`` to list available images'\n )\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\ndef _is_port_available(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind(('127.0.0.1', port))\n return True\n except socket.error:\n return False\n finally:\n s.close()\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n handler.timeout = None\n server = FTPServer(('127.0.0.1', port), handler)\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n return server\n\n\ndef _run_ftp_server(server):\n try:\n server.serve_forever()\n finally:\n logger.info('FTP server terminated')\n server.close_all()\n\n\ndef _get_archive_rules(image_path, rule_names):\n if _has_app_image(rule_names):\n raise CommandError(\n 'Building archives of app images is not supported yet')\n archive_rules = []\n for r in rule_names:\n archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))\n logger.info('The following archives will be built:')\n for a in archive_rules:\n logger.info(' * %s', a)\n return archive_rules\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-5": "\"\"\"\nCopyright (c) 2017 Cyberhaven\nCopyright (c) 2017 Dependable Systems Laboratory, EPFL\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nimport glob\nimport grp\nimport logging\nimport os\nimport pwd\nimport re\nimport socket\nimport time\n\nfrom threading import Thread\n\nimport psutil\nfrom psutil import NoSuchProcess\n\nfrom pyftpdlib.authorizers import DummyAuthorizer\nfrom pyftpdlib.handlers import FTPHandler\nfrom pyftpdlib.servers import FTPServer\n\nimport sh\nfrom sh import ErrorReturnCode\n\nfrom s2e_env import CONSTANTS\nfrom s2e_env.command import EnvCommand, CommandError\nfrom s2e_env.utils import repos\nfrom s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \\\n translate_image_name\n\n\nlogger = logging.getLogger('image_build')\n\n\ndef _get_user_groups(user_name):\n \"\"\"\n Get a list of groups for the user ``user_name``.\n \"\"\"\n groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]\n gid = pwd.getpwnam(user_name).pw_gid\n groups.append(grp.getgrgid(gid).gr_name)\n\n return groups\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\ndef _raise_group_error(group_name):\n raise CommandError(f'You must belong to the {group_name} group in order to build '\n 'images. Please run the following command, then logout '\n 'and login:\\n\\n'\n f'\\tsudo usermod -a -G {group_name} $(whoami)')\n\n\ndef _check_groups_docker():\n \"\"\"\n Check that the current user belongs to the required groups to both run S2E and build S2E images.\n \"\"\"\n if not _user_belongs_to('docker'):\n _raise_group_error('docker')\n\n\ndef _check_groups_kvm():\n \"\"\"Being member of KVM is required only when using KVM to build images\"\"\"\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')\n\n\ndef _check_virtualbox():\n \"\"\"\n Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must\n *not* be running together with S2E.\n \"\"\"\n # Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679\n # to avoid race conditions\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'VBoxHeadless':\n raise CommandError('S2E uses KVM to build images. VirtualBox '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VirtualBox VMs and try again.')\n except NoSuchProcess:\n pass\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError('S2E uses KVM to build images. VMware '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VMware VMs and try again.')\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError('KVM interface not found - check that /dev/kvm '\n 'exists. Alternatively, you can disable KVM (-n '\n 'option) or download pre-built images (-d option)')\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError('Make sure that the kernels in /boot are readable. '\n 'This is required for guestfish. Please run the '\n 'following command:\\n\\n'\n 'sudo chmod ugo+r /boot/vmlinu*') from None\n\n\n# pylint: disable=no-member\ndef _check_cow(image_dir):\n \"\"\"\n Check that the file system that stores guest images supports copy-on-write.\n \"\"\"\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '\n 'to list available images')\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n\n for image_name in image_names:\n image = image_descriptors[image_name]\n\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n\n name = iso.get('name', '')\n if not name:\n continue\n\n if not iso_dir:\n raise CommandError(\n 'Please use the --iso-dir option to specify the path '\n f'to a folder that contains {name}'\n )\n\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(f'The image {image_name} requires {path}, which could not be found')\n\n\ndef _is_port_available(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n s.bind((\"127.0.0.1\", port))\n return True\n except socket.error:\n return False\n finally:\n s.close()\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n # QEMU slirp won't let the guest reconnect if timeout happens, so we disable it\n handler.timeout = None\n\n server = FTPServer((\"127.0.0.1\", port), handler)\n\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n\n return server\n\n\ndef _run_ftp_server(server):\n try:\n server.serve_forever()\n finally:\n logger.info('FTP server terminated')\n server.close_all()\n\n\ndef _get_archive_rules(image_path, rule_names):\n if _has_app_image(rule_names):\n raise CommandError('Building archives of app images is not supported yet')\n\n archive_rules = []\n for r in rule_names:\n archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))\n\n logger.info('The following archives will be built:')\n for a in archive_rules:\n logger.info(' * %s', a)\n\n return archive_rules\n\n\ndef _download_images(image_path, image_names, templates):\n if _has_app_image(image_names):\n raise CommandError('Downloading of app images is not supported yet')\n\n image_downloader = ImageDownloader(templates)\n image_downloader.download_images(image_names, image_path)\n\n logger.info('Successfully downloaded images: %s', ', '.join(image_names))\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n\n parser.add_argument('name',\n help='The name of the image to build. If empty,'\n ' shows available images', nargs='*')\n parser.add_argument('-g', '--gui', action='store_true',\n help='Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int,\n help='The number of cores used when building the '\n 'VM image. Defaults to 2')\n parser.add_argument('-x', '--clean', action='store_true',\n help='Deletes all images and rebuild them from '\n 'scratch')\n parser.add_argument('-a', '--archive', action='store_true',\n help='Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,\n help='Port for the internal FTP server to receive files from guest VMs during build')\n parser.add_argument('-d', '--download', action='store_true',\n help='Download image from the repository instead '\n 'of building it')\n parser.add_argument('-i', '--iso-dir',\n help='Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true',\n help='Disable KVM during image build')\n\n def handle(self, *args, **options):\n # If DISPLAY is missing, don't use headless mode\n if options['gui']:\n self._headless = False\n\n # If KVM has been explicitly disabled, don't use it during the build\n if options['no_kvm']:\n self._use_kvm = False\n\n self._num_cores = options['cores']\n\n # The path could have been deleted by a previous clean\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates, app_templates)\n\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print('\\nRun ``s2e image_build <name>`` to build an image. '\n 'Note that you must run ``s2e build`` **before** building '\n 'an image')\n return\n\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n\n rule_names = image_names\n\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None\n\n # Check for optional product keys and iso directories.\n # These may or may not be required, depending on the set of images.\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n\n _check_groups_docker()\n _check_vmlinux()\n\n self._has_cow = _check_cow(self.image_path())\n\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n\n if not _is_port_available(options['ftp_port']):\n raise CommandError(f'localhost:{options[\"ftp_port\"]} is not available. Check that the port is free or '\n 'specify a port with --ftp-port')\n\n # Clone kernel if needed.\n # This is necessary if the s2e env has been initialized with -b flag.\n self._clone_kernel()\n\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)\n\n logger.success('Built image(s) \\'%s\\'', ' '.join(image_names))\n\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = \\\n self.source_path(CONSTANTS['repos']['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))\n\n if self._headless:\n logger.warning('Image creation will run in headless mode. '\n 'Use --gui to see graphic output for debugging')\n else:\n env['GRAPHICS'] = ''\n\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'),\n directory=self.image_path(),\n _env=env, _fg=True)\n\n make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n\n logger.info('Cloning kernels repository to %s', kernels_root)\n\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError('No images available to build. Make sure that '\n f'{images_json_path} exists and is valid')\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(f' * {image:{max_image_len}} - {image_descriptors[image][\"name\"]}')\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError('No apps available to build. Make sure that '\n f'{apps_json_path} exists and is valid')\n\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f' * {base_image}/{app_template} - {desc[\"name\"]}')\n",
"step-ids": [
17,
21,
24,
30,
34
]
}
|
[
17,
21,
24,
30,
34
] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view1")
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
|
normal
|
{
"blob_id": "826abb18b11afd7a010e2bfc5a29ba068218c23a",
"index": 7550,
"step-1": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n <mask token>\n <mask token>\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-2": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n <mask token>\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-3": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view1')\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-4": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view1')\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-5": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\n\nclass View1(LoginRequiredMixin, View):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view1\")\n\nclass View2(LoginRequiredMixin, View):\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view2\")\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view2\")\n\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
from scipy import misc
from math import exp
import tensorflow as tf
import timeit
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
IMAGE_WIDTH = 30
IMAGE_HEIGHT = 30
IMAGE_DEPTH = 3
IMAGE_PIXELS = IMAGE_WIDTH * IMAGE_HEIGHT
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def get_single_img():
file_path = dir_path+'/trunk_data_set/img_test/true_seg_cube/220.png'
img = misc.imread(file_path)
print "the inpute image shape: ", img.shape
return img
def conv_net(x, W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2):
# first convolutional leyer
x_image = tf.reshape(x, [-1,30,30,3])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# second convolutional leyer
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# third leyer
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*60])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# drop out
h_fc1_drop = tf.nn.dropout(h_fc1, 1.0)
# rool out leyer
out = tf.add(tf.matmul(h_fc1_drop, W_fc2) , b_fc2)
return out
config = tf.ConfigProto( device_count = {'GPU': 0} )
with tf.Session(config=config) as sess1:
image_input = get_single_img()
saver = tf.train.import_meta_graph('learned_model/model.ckpt.meta')
saver.restore(sess1,"learned_model/model.ckpt")
start = timeit.default_timer()
#print("Model restored.")
#print tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
W_conv1 = [v for v in tf.trainable_variables() if v.name == "Variable:0"][0]
b_conv1 = [v for v in tf.trainable_variables() if v.name == "Variable_1:0"][0]
W_conv2 = [v for v in tf.trainable_variables() if v.name == "Variable_2:0"][0]
b_conv2 = [v for v in tf.trainable_variables() if v.name == "Variable_3:0"][0]
W_fc1 = [v for v in tf.trainable_variables() if v.name == "Variable_4:0"][0]
b_fc1 = [v for v in tf.trainable_variables() if v.name == "Variable_5:0"][0]
W_fc2 = [v for v in tf.trainable_variables() if v.name == "Variable_6:0"][0]
b_fc2 = [v for v in tf.trainable_variables() if v.name == "Variable_7:0"][0]
img2 = tf.convert_to_tensor(image_input)
img2 = tf.reshape( img2, [ IMAGE_PIXELS * IMAGE_DEPTH ] )
img2.set_shape( [ IMAGE_PIXELS * IMAGE_DEPTH ] )
image_input = tf.cast( img2, tf.float32 ) * ( 1. / 255 ) - 0.5
y = conv_net(image_input,W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2)
stop = timeit.default_timer()
print "There is no trunk with %f probablity" % (1/(1+exp(-y.eval()[0][1])))
print "There is a trunk with %f probablity" % (1/(1+exp(-y.eval()[0][0])))
print "calculation time :", stop - start
|
normal
|
{
"blob_id": "8b4bd2d267f20775ee5d41f7fe9ef6f6eeab5bb0",
"index": 2516,
"step-1": "from scipy import misc\nfrom math import exp\nimport tensorflow as tf\nimport timeit\nimport os \n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\nIMAGE_WIDTH = 30\nIMAGE_HEIGHT = 30\nIMAGE_DEPTH = 3\nIMAGE_PIXELS = IMAGE_WIDTH * IMAGE_HEIGHT\n\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\ndef get_single_img():\n file_path = dir_path+'/trunk_data_set/img_test/true_seg_cube/220.png' \n img = misc.imread(file_path)\n print \"the inpute image shape: \", img.shape\n return img\n\n\ndef conv_net(x, W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2):\n # first convolutional leyer\n x_image = tf.reshape(x, [-1,30,30,3])\n\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n\n # second convolutional leyer\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n # third leyer\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*60])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # drop out\n h_fc1_drop = tf.nn.dropout(h_fc1, 1.0)\n\n # rool out leyer\n out = tf.add(tf.matmul(h_fc1_drop, W_fc2) , b_fc2)\t\n return out \n\n\nconfig = tf.ConfigProto( device_count = {'GPU': 0} )\n\n\nwith tf.Session(config=config) as sess1:\n \n image_input = get_single_img() \n\n saver = tf.train.import_meta_graph('learned_model/model.ckpt.meta')\n saver.restore(sess1,\"learned_model/model.ckpt\")\n\n start = timeit.default_timer()\n \n #print(\"Model restored.\")\n #print tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n \n\n\n \n W_conv1 = [v for v in tf.trainable_variables() if v.name == \"Variable:0\"][0]\n\n b_conv1 = [v for v in tf.trainable_variables() if v.name == \"Variable_1:0\"][0]\n \n W_conv2 = [v for v in tf.trainable_variables() if v.name == \"Variable_2:0\"][0]\n\n b_conv2 = [v for v in tf.trainable_variables() if v.name == \"Variable_3:0\"][0]\n \n W_fc1 = [v for v in tf.trainable_variables() if v.name == \"Variable_4:0\"][0]\n \n b_fc1 = [v for v in tf.trainable_variables() if v.name == \"Variable_5:0\"][0]\n \n W_fc2 = [v for v in tf.trainable_variables() if v.name == \"Variable_6:0\"][0]\n \n b_fc2 = [v for v in tf.trainable_variables() if v.name == \"Variable_7:0\"][0]\t\n\n\n img2 = tf.convert_to_tensor(image_input)\n img2 = tf.reshape( img2, [ IMAGE_PIXELS * IMAGE_DEPTH ] )\n img2.set_shape( [ IMAGE_PIXELS * IMAGE_DEPTH ] )\n\n image_input = tf.cast( img2, tf.float32 ) * ( 1. / 255 ) - 0.5\n \n y = conv_net(image_input,W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2)\n\n stop = timeit.default_timer()\n\n print \"There is no trunk with %f probablity\" % (1/(1+exp(-y.eval()[0][1])))\n\n print \"There is a trunk with %f probablity\" % (1/(1+exp(-y.eval()[0][0])))\n \n print \"calculation time :\", stop - start\t\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class AggregateAgent(Addressable, AbstractAgent):
@Inject('aggregated_agents:_AggregateAgent__agents')
@InjectOptional('locator')
def __init__(self, name=None):
self.name = name
super(AggregateAgent, self).__init__()
for agent in self.__agents.values():
agent.parent = self
self.steps = 0
def step(self):
for agent in self.__agents.values():
agent.step()
self.steps += 1
def remove_agent(self, agent):
del self.__agents[agent.get_address()]
self.locator.remove_agent(agent)
agent.parent = None
return agent
def add_agent(self, agent):
agent.parent = self
self.__agents[agent.get_address()] = agent
<|reserved_special_token_0|>
def get_fitness(self):
try:
return max(agent.get_fitness() for agent in self.__agents.values())
except ValueError:
return None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_neighbour(self, agent):
return self.locator.get_neighbour(agent)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AggregateAgent(Addressable, AbstractAgent):
@Inject('aggregated_agents:_AggregateAgent__agents')
@InjectOptional('locator')
def __init__(self, name=None):
self.name = name
super(AggregateAgent, self).__init__()
for agent in self.__agents.values():
agent.parent = self
self.steps = 0
def step(self):
for agent in self.__agents.values():
agent.step()
self.steps += 1
def remove_agent(self, agent):
del self.__agents[agent.get_address()]
self.locator.remove_agent(agent)
agent.parent = None
return agent
def add_agent(self, agent):
agent.parent = self
self.__agents[agent.get_address()] = agent
def get_agents(self):
return self.__agents.values()
def get_fitness(self):
try:
return max(agent.get_fitness() for agent in self.__agents.values())
except ValueError:
return None
def get_best_genotype(self):
return max(self.__agents.values(), key=lambda a: a.get_fitness()
).get_best_genotype()
def move(self, agent):
allowed_moves = self.locator.get_allowed_moves(agent)
if allowed_moves:
self.locator.remove_agent(agent)
destination = get_random_move(allowed_moves)
self.locator.add_agent(agent, destination)
logger.debug('%s moved to %s' % (agent, destination))
def get_neighbour(self, agent):
return self.locator.get_neighbour(agent)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AggregateAgent(Addressable, AbstractAgent):
@Inject('aggregated_agents:_AggregateAgent__agents')
@InjectOptional('locator')
def __init__(self, name=None):
self.name = name
super(AggregateAgent, self).__init__()
for agent in self.__agents.values():
agent.parent = self
self.steps = 0
def step(self):
for agent in self.__agents.values():
agent.step()
self.steps += 1
def remove_agent(self, agent):
del self.__agents[agent.get_address()]
self.locator.remove_agent(agent)
agent.parent = None
return agent
def add_agent(self, agent):
agent.parent = self
self.__agents[agent.get_address()] = agent
def get_agents(self):
return self.__agents.values()
def get_fitness(self):
try:
return max(agent.get_fitness() for agent in self.__agents.values())
except ValueError:
return None
def get_best_genotype(self):
return max(self.__agents.values(), key=lambda a: a.get_fitness()
).get_best_genotype()
def move(self, agent):
allowed_moves = self.locator.get_allowed_moves(agent)
if allowed_moves:
self.locator.remove_agent(agent)
destination = get_random_move(allowed_moves)
self.locator.add_agent(agent, destination)
logger.debug('%s moved to %s' % (agent, destination))
def get_neighbour(self, agent):
return self.locator.get_neighbour(agent)
def aggregate_agents_factory(*args):
def factory():
agents = {}
for name in args:
agent = AggregateAgent(name)
agents[agent.get_address()] = agent
return agents
return factory
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
class AggregateAgent(Addressable, AbstractAgent):
@Inject('aggregated_agents:_AggregateAgent__agents')
@InjectOptional('locator')
def __init__(self, name=None):
self.name = name
super(AggregateAgent, self).__init__()
for agent in self.__agents.values():
agent.parent = self
self.steps = 0
def step(self):
for agent in self.__agents.values():
agent.step()
self.steps += 1
def remove_agent(self, agent):
del self.__agents[agent.get_address()]
self.locator.remove_agent(agent)
agent.parent = None
return agent
def add_agent(self, agent):
agent.parent = self
self.__agents[agent.get_address()] = agent
def get_agents(self):
return self.__agents.values()
def get_fitness(self):
try:
return max(agent.get_fitness() for agent in self.__agents.values())
except ValueError:
return None
def get_best_genotype(self):
return max(self.__agents.values(), key=lambda a: a.get_fitness()
).get_best_genotype()
def move(self, agent):
allowed_moves = self.locator.get_allowed_moves(agent)
if allowed_moves:
self.locator.remove_agent(agent)
destination = get_random_move(allowed_moves)
self.locator.add_agent(agent, destination)
logger.debug('%s moved to %s' % (agent, destination))
def get_neighbour(self, agent):
return self.locator.get_neighbour(agent)
def aggregate_agents_factory(*args):
def factory():
agents = {}
for name in args:
agent = AggregateAgent(name)
agents[agent.get_address()] = agent
return agents
return factory
def get_random_move(allowed_moves):
destination = random.sample(allowed_moves, 1)[0]
return destination
<|reserved_special_token_1|>
import logging
import random
from pyage.core.address import Addressable
from pyage.core.agent.agent import AbstractAgent
from pyage.core.inject import Inject, InjectOptional
logger = logging.getLogger(__name__)
class AggregateAgent(Addressable, AbstractAgent):
@Inject("aggregated_agents:_AggregateAgent__agents")
@InjectOptional("locator")
def __init__(self, name=None):
self.name = name
super(AggregateAgent, self).__init__()
for agent in self.__agents.values():
agent.parent = self
self.steps = 0
def step(self):
for agent in self.__agents.values():
agent.step()
self.steps += 1
def remove_agent(self, agent):
del self.__agents[agent.get_address()]
self.locator.remove_agent(agent)
agent.parent = None
return agent
def add_agent(self, agent):
agent.parent = self
self.__agents[agent.get_address()] = agent
def get_agents(self):
return self.__agents.values()
def get_fitness(self):
try:
return max(agent.get_fitness() for agent in self.__agents.values())
except ValueError:
return None
def get_best_genotype(self):
return max(self.__agents.values(), key=lambda a: a.get_fitness()).get_best_genotype()
def move(self, agent):
allowed_moves = self.locator.get_allowed_moves(agent)
if allowed_moves:
self.locator.remove_agent(agent)
destination = get_random_move(allowed_moves)
self.locator.add_agent(agent, destination)
logger.debug("%s moved to %s" % (agent, destination))
def get_neighbour(self, agent):
return self.locator.get_neighbour(agent)
def aggregate_agents_factory(*args):
def factory():
agents = {}
for name in args:
agent = AggregateAgent(name)
agents[agent.get_address()] = agent
return agents
return factory
def get_random_move(allowed_moves):
destination = random.sample(allowed_moves, 1)[0]
return destination
|
flexible
|
{
"blob_id": "85903f0c6bd4c896379c1357a08ae3bfa19d5415",
"index": 7065,
"step-1": "<mask token>\n\n\nclass AggregateAgent(Addressable, AbstractAgent):\n\n @Inject('aggregated_agents:_AggregateAgent__agents')\n @InjectOptional('locator')\n def __init__(self, name=None):\n self.name = name\n super(AggregateAgent, self).__init__()\n for agent in self.__agents.values():\n agent.parent = self\n self.steps = 0\n\n def step(self):\n for agent in self.__agents.values():\n agent.step()\n self.steps += 1\n\n def remove_agent(self, agent):\n del self.__agents[agent.get_address()]\n self.locator.remove_agent(agent)\n agent.parent = None\n return agent\n\n def add_agent(self, agent):\n agent.parent = self\n self.__agents[agent.get_address()] = agent\n <mask token>\n\n def get_fitness(self):\n try:\n return max(agent.get_fitness() for agent in self.__agents.values())\n except ValueError:\n return None\n <mask token>\n <mask token>\n\n def get_neighbour(self, agent):\n return self.locator.get_neighbour(agent)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AggregateAgent(Addressable, AbstractAgent):\n\n @Inject('aggregated_agents:_AggregateAgent__agents')\n @InjectOptional('locator')\n def __init__(self, name=None):\n self.name = name\n super(AggregateAgent, self).__init__()\n for agent in self.__agents.values():\n agent.parent = self\n self.steps = 0\n\n def step(self):\n for agent in self.__agents.values():\n agent.step()\n self.steps += 1\n\n def remove_agent(self, agent):\n del self.__agents[agent.get_address()]\n self.locator.remove_agent(agent)\n agent.parent = None\n return agent\n\n def add_agent(self, agent):\n agent.parent = self\n self.__agents[agent.get_address()] = agent\n\n def get_agents(self):\n return self.__agents.values()\n\n def get_fitness(self):\n try:\n return max(agent.get_fitness() for agent in self.__agents.values())\n except ValueError:\n return None\n\n def get_best_genotype(self):\n return max(self.__agents.values(), key=lambda a: a.get_fitness()\n ).get_best_genotype()\n\n def move(self, agent):\n allowed_moves = self.locator.get_allowed_moves(agent)\n if allowed_moves:\n self.locator.remove_agent(agent)\n destination = get_random_move(allowed_moves)\n self.locator.add_agent(agent, destination)\n logger.debug('%s moved to %s' % (agent, destination))\n\n def get_neighbour(self, agent):\n return self.locator.get_neighbour(agent)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AggregateAgent(Addressable, AbstractAgent):\n\n @Inject('aggregated_agents:_AggregateAgent__agents')\n @InjectOptional('locator')\n def __init__(self, name=None):\n self.name = name\n super(AggregateAgent, self).__init__()\n for agent in self.__agents.values():\n agent.parent = self\n self.steps = 0\n\n def step(self):\n for agent in self.__agents.values():\n agent.step()\n self.steps += 1\n\n def remove_agent(self, agent):\n del self.__agents[agent.get_address()]\n self.locator.remove_agent(agent)\n agent.parent = None\n return agent\n\n def add_agent(self, agent):\n agent.parent = self\n self.__agents[agent.get_address()] = agent\n\n def get_agents(self):\n return self.__agents.values()\n\n def get_fitness(self):\n try:\n return max(agent.get_fitness() for agent in self.__agents.values())\n except ValueError:\n return None\n\n def get_best_genotype(self):\n return max(self.__agents.values(), key=lambda a: a.get_fitness()\n ).get_best_genotype()\n\n def move(self, agent):\n allowed_moves = self.locator.get_allowed_moves(agent)\n if allowed_moves:\n self.locator.remove_agent(agent)\n destination = get_random_move(allowed_moves)\n self.locator.add_agent(agent, destination)\n logger.debug('%s moved to %s' % (agent, destination))\n\n def get_neighbour(self, agent):\n return self.locator.get_neighbour(agent)\n\n\ndef aggregate_agents_factory(*args):\n\n def factory():\n agents = {}\n for name in args:\n agent = AggregateAgent(name)\n agents[agent.get_address()] = agent\n return agents\n return factory\n\n\n<mask token>\n",
"step-4": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\nclass AggregateAgent(Addressable, AbstractAgent):\n\n @Inject('aggregated_agents:_AggregateAgent__agents')\n @InjectOptional('locator')\n def __init__(self, name=None):\n self.name = name\n super(AggregateAgent, self).__init__()\n for agent in self.__agents.values():\n agent.parent = self\n self.steps = 0\n\n def step(self):\n for agent in self.__agents.values():\n agent.step()\n self.steps += 1\n\n def remove_agent(self, agent):\n del self.__agents[agent.get_address()]\n self.locator.remove_agent(agent)\n agent.parent = None\n return agent\n\n def add_agent(self, agent):\n agent.parent = self\n self.__agents[agent.get_address()] = agent\n\n def get_agents(self):\n return self.__agents.values()\n\n def get_fitness(self):\n try:\n return max(agent.get_fitness() for agent in self.__agents.values())\n except ValueError:\n return None\n\n def get_best_genotype(self):\n return max(self.__agents.values(), key=lambda a: a.get_fitness()\n ).get_best_genotype()\n\n def move(self, agent):\n allowed_moves = self.locator.get_allowed_moves(agent)\n if allowed_moves:\n self.locator.remove_agent(agent)\n destination = get_random_move(allowed_moves)\n self.locator.add_agent(agent, destination)\n logger.debug('%s moved to %s' % (agent, destination))\n\n def get_neighbour(self, agent):\n return self.locator.get_neighbour(agent)\n\n\ndef aggregate_agents_factory(*args):\n\n def factory():\n agents = {}\n for name in args:\n agent = AggregateAgent(name)\n agents[agent.get_address()] = agent\n return agents\n return factory\n\n\ndef get_random_move(allowed_moves):\n destination = random.sample(allowed_moves, 1)[0]\n return destination\n",
"step-5": "import logging\nimport random\nfrom pyage.core.address import Addressable\nfrom pyage.core.agent.agent import AbstractAgent\nfrom pyage.core.inject import Inject, InjectOptional\n\nlogger = logging.getLogger(__name__)\n\n\nclass AggregateAgent(Addressable, AbstractAgent):\n @Inject(\"aggregated_agents:_AggregateAgent__agents\")\n @InjectOptional(\"locator\")\n def __init__(self, name=None):\n self.name = name\n super(AggregateAgent, self).__init__()\n for agent in self.__agents.values():\n agent.parent = self\n self.steps = 0\n\n def step(self):\n for agent in self.__agents.values():\n agent.step()\n self.steps += 1\n\n def remove_agent(self, agent):\n del self.__agents[agent.get_address()]\n self.locator.remove_agent(agent)\n agent.parent = None\n return agent\n\n def add_agent(self, agent):\n agent.parent = self\n self.__agents[agent.get_address()] = agent\n\n def get_agents(self):\n return self.__agents.values()\n\n def get_fitness(self):\n try:\n return max(agent.get_fitness() for agent in self.__agents.values())\n except ValueError:\n return None\n\n def get_best_genotype(self):\n return max(self.__agents.values(), key=lambda a: a.get_fitness()).get_best_genotype()\n\n def move(self, agent):\n allowed_moves = self.locator.get_allowed_moves(agent)\n if allowed_moves:\n self.locator.remove_agent(agent)\n destination = get_random_move(allowed_moves)\n self.locator.add_agent(agent, destination)\n logger.debug(\"%s moved to %s\" % (agent, destination))\n\n def get_neighbour(self, agent):\n return self.locator.get_neighbour(agent)\n\n\ndef aggregate_agents_factory(*args):\n def factory():\n agents = {}\n for name in args:\n agent = AggregateAgent(name)\n agents[agent.get_address()] = agent\n return agents\n\n return factory\n\n\ndef get_random_move(allowed_moves):\n destination = random.sample(allowed_moves, 1)[0]\n return destination",
"step-ids": [
7,
10,
11,
13,
15
]
}
|
[
7,
10,
11,
13,
15
] |
<|reserved_special_token_0|>
def run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,
scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[
float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(np.arange(0, X_test.shape[1]),
N_FEATURES, replace=False)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,
scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[
float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(np.arange(0, X_test.shape[1]),
N_FEATURES, replace=False)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == '__main__':
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument('--data_origin', type=str, default='MIMIC', help=
'Which data to use')
parser.add_argument('--models', type=str, nargs='+', default=
AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=
'Determine the models which are being used for this experiment.')
parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=
'Define the directory that results should be saved to.')
args = parser.parse_args()
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),
selection=args.models, origin=args.data_origin):
print(name)
nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,
test_data[feature_names].values, val_data[feature_names].values,
train_data[y_name].values, test_data[y_name].values, val_data[
y_name].values)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(nov_an,
test_data[feature_names], scoring_func=scoring_func)
dir_name = os.path.join(args.result_dir, args.data_origin,
'perturbation', name, 'detection', scoring_func)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:
pickle.dump(aucs_dict, f)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SCALES = [10, 100, 1000, 10000]
N_FEATURES = 100
RESULT_DIR = '../../data/results'
def run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,
scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[
float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(np.arange(0, X_test.shape[1]),
N_FEATURES, replace=False)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == '__main__':
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument('--data_origin', type=str, default='MIMIC', help=
'Which data to use')
parser.add_argument('--models', type=str, nargs='+', default=
AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=
'Determine the models which are being used for this experiment.')
parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=
'Define the directory that results should be saved to.')
args = parser.parse_args()
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),
selection=args.models, origin=args.data_origin):
print(name)
nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,
test_data[feature_names].values, val_data[feature_names].values,
train_data[y_name].values, test_data[y_name].values, val_data[
y_name].values)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(nov_an,
test_data[feature_names], scoring_func=scoring_func)
dir_name = os.path.join(args.result_dir, args.data_origin,
'perturbation', name, 'detection', scoring_func)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:
pickle.dump(aucs_dict, f)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import pickle
from copy import deepcopy
from collections import defaultdict
import argparse
from typing import Tuple, Dict, List
import numpy as np
from tqdm import tqdm
import torch
from uncertainty_estimation.utils.model_init import AVAILABLE_MODELS
from uncertainty_estimation.utils.model_init import init_models
from uncertainty_estimation.utils.datahandler import DataHandler
from uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer
SCALES = [10, 100, 1000, 10000]
N_FEATURES = 100
RESULT_DIR = '../../data/results'
def run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,
scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[
float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(np.arange(0, X_test.shape[1]),
N_FEATURES, replace=False)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == '__main__':
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument('--data_origin', type=str, default='MIMIC', help=
'Which data to use')
parser.add_argument('--models', type=str, nargs='+', default=
AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=
'Determine the models which are being used for this experiment.')
parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=
'Define the directory that results should be saved to.')
args = parser.parse_args()
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),
selection=args.models, origin=args.data_origin):
print(name)
nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,
test_data[feature_names].values, val_data[feature_names].values,
train_data[y_name].values, test_data[y_name].values, val_data[
y_name].values)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(nov_an,
test_data[feature_names], scoring_func=scoring_func)
dir_name = os.path.join(args.result_dir, args.data_origin,
'perturbation', name, 'detection', scoring_func)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:
pickle.dump(aucs_dict, f)
<|reserved_special_token_1|>
"""
Test the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.
"""
# STD
import os
import pickle
from copy import deepcopy
from collections import defaultdict
import argparse
from typing import Tuple, Dict, List
# EXT
import numpy as np
from tqdm import tqdm
import torch
# PROJECT
from uncertainty_estimation.utils.model_init import AVAILABLE_MODELS
from uncertainty_estimation.utils.model_init import init_models
from uncertainty_estimation.utils.datahandler import DataHandler
from uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer
# CONST
SCALES = [10, 100, 1000, 10000]
N_FEATURES = 100
RESULT_DIR = "../../data/results"
def run_perturbation_experiment(
nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None
) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(
np.arange(0, X_test.shape[1]), N_FEATURES, replace=False
)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == "__main__":
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_origin", type=str, default="MIMIC", help="Which data to use"
)
parser.add_argument(
"--models",
type=str,
nargs="+",
default=AVAILABLE_MODELS,
choices=AVAILABLE_MODELS,
help="Determine the models which are being used for this experiment.",
)
parser.add_argument(
"--result_dir",
type=str,
default=RESULT_DIR,
help="Define the directory that results should be saved to.",
)
args = parser.parse_args()
# Loading the data
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(
input_dim=len(feature_names), selection=args.models, origin=args.data_origin
):
print(name)
nov_an = NoveltyAnalyzer(
ne,
train_data[feature_names].values,
test_data[feature_names].values,
val_data[feature_names].values,
train_data[y_name].values,
test_data[y_name].values,
val_data[y_name].values,
)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(
nov_an, test_data[feature_names], scoring_func=scoring_func
)
dir_name = os.path.join(
args.result_dir,
args.data_origin,
"perturbation",
name,
"detection",
scoring_func,
)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, "recall.pkl"), "wb") as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, "detect_auc.pkl"), "wb") as f:
pickle.dump(aucs_dict, f)
|
flexible
|
{
"blob_id": "bf3e7f1aa9fd20b69e751da9ac8970c88b1144eb",
"index": 9363,
"step-1": "<mask token>\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-3": "<mask token>\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = '../../data/results'\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-4": "<mask token>\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport argparse\nfrom typing import Tuple, Dict, List\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom uncertainty_estimation.utils.model_init import AVAILABLE_MODELS\nfrom uncertainty_estimation.utils.model_init import init_models\nfrom uncertainty_estimation.utils.datahandler import DataHandler\nfrom uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = '../../data/results'\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-5": "\"\"\"\nTest the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.\n\"\"\"\n\n# STD\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport argparse\nfrom typing import Tuple, Dict, List\n\n# EXT\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\n\n# PROJECT\nfrom uncertainty_estimation.utils.model_init import AVAILABLE_MODELS\nfrom uncertainty_estimation.utils.model_init import init_models\nfrom uncertainty_estimation.utils.datahandler import DataHandler\nfrom uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer\n\n# CONST\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = \"../../data/results\"\n\n\ndef run_perturbation_experiment(\n nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None\n) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(\n np.arange(0, X_test.shape[1]), N_FEATURES, replace=False\n )\n\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n\n return aucs_dict, recall_dict\n\n\nif __name__ == \"__main__\":\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_origin\", type=str, default=\"MIMIC\", help=\"Which data to use\"\n )\n parser.add_argument(\n \"--models\",\n type=str,\n nargs=\"+\",\n default=AVAILABLE_MODELS,\n choices=AVAILABLE_MODELS,\n help=\"Determine the models which are being used for this experiment.\",\n )\n parser.add_argument(\n \"--result_dir\",\n type=str,\n default=RESULT_DIR,\n help=\"Define the directory that results should be saved to.\",\n )\n args = parser.parse_args()\n\n # Loading the data\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n\n for ne, scoring_funcs, name in init_models(\n input_dim=len(feature_names), selection=args.models, origin=args.data_origin\n ):\n print(name)\n nov_an = NoveltyAnalyzer(\n ne,\n train_data[feature_names].values,\n test_data[feature_names].values,\n val_data[feature_names].values,\n train_data[y_name].values,\n test_data[y_name].values,\n val_data[y_name].values,\n )\n nov_an.train()\n\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(\n nov_an, test_data[feature_names], scoring_func=scoring_func\n )\n\n dir_name = os.path.join(\n args.result_dir,\n args.data_origin,\n \"perturbation\",\n name,\n \"detection\",\n scoring_func,\n )\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n with open(os.path.join(dir_name, \"recall.pkl\"), \"wb\") as f:\n pickle.dump(recall_dict, f)\n\n with open(os.path.join(dir_name, \"detect_auc.pkl\"), \"wb\") as f:\n pickle.dump(aucs_dict, f)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Flask, render_template, request, redirect, flash, session
from mysqlconnection import connectToMySQL
from flask_bcrypt import Bcrypt
import re
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = "something secret10"
DATABASE = "exam_quote_dash"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
#users
# id_users, first_name, last_name, email, password
#quotes
#id_quotes, from_user, liked_from, content, author
@app.route("/")
def signin():
return render_template("index.html")
@app.route("/register", methods=["POST"])
def register():
is_valid = True
if len(request.form['first_name']) < 2:
is_valid = False
flash("please enter your first name.")
if len(request.form['last_name']) < 2:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if len(request.form['password']) < 8:
is_valid = False
flash("password must be atleast 8 characters long.")
if (request.form['password'] != request.form['confirm_password']):
is_valid = False
flash("passwords do not match.")
if not is_valid:
return redirect('/')
else:
flash("sucessfully added")
mysql = connectToMySQL(DATABASE)
pw_hash = bcrypt.generate_password_hash(request.form['password'])
query = "INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);"
data = {
'em': request.form['email'],
'pw': pw_hash,
'fn': request.form['first_name'],
'ln': request.form['last_name']
}
id_users = mysql.query_db(query,data)
session['id_users'] = id_users
session['greeting'] = request.form['first_name']
return redirect('/quotes')
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM users WHERE email = %(em)s;"
data = {
'em': request.form['email']
}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form['password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash("Email and/or password does not match.")
return redirect('/')
else:
flash("Please enter your registered Email.")
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes JOIN users ON from_user = id_users;"
join = mysql.query_db(query)
return render_template('quotes.html', joined = join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash("quotes are required to be longer than 10 characters.")
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);"
data = {
'quo': request.form['content'],
'auth': request.form['author'],
'from': session['id_users']
}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id,thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM quotes WHERE id_quotes = %(id)s;"
data = {
'id': id
}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route("/edit")
def edit():
mysql = connectToMySQL(DATABASE)
query = "SELECT * From users WHERE id_users = %(id)s"
data ={
'id' : session['id_users']
}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users = users_table)
@app.route("/update", methods=["POST"])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash("please enter your first name.")
if len(request.form['l_name']) < 3:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if not is_valid:
return redirect('/edit')
else:
flash("sucessfully updated")
mysql = connectToMySQL(DATABASE)
query = "UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;"
data = {
"fn": request.form["f_name"],
"ln": request.form["l_name"],
"em": request.form["email"],
'id' : session['id_users']
}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route("/my_posts")
def my_post():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes WHERE from_user = %(id)s;"
data ={
'id' : session['id_users']
}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes = my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__=="__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "e732fa0e2b377a87b8b088303b277cc08cb695b3",
"index": 5279,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef signin():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\n@app.route('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\n<mask token>\n\n\n@app.route('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\n@app.route('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\n@app.route('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\n@app.route('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef signin():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\n@app.route('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\n@app.route('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\n@app.route('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\n@app.route('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\n@app.route('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\n@app.route('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/')\ndef signin():\n return render_template('index.html')\n\n\n@app.route('/register', methods=['POST'])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['last_name']) < 2:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if len(request.form['password']) < 8:\n is_valid = False\n flash('password must be atleast 8 characters long.')\n if request.form['password'] != request.form['confirm_password']:\n is_valid = False\n flash('passwords do not match.')\n if not is_valid:\n return redirect('/')\n else:\n flash('sucessfully added')\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = (\n 'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'\n )\n data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[\n 'first_name'], 'ln': request.form['last_name']}\n id_users = mysql.query_db(query, data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name']\n return redirect('/quotes')\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\n@app.route('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\n@app.route('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\n@app.route('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\n@app.route('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\n@app.route('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\n@app.route('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "<mask token>\napp = Flask(__name__)\nbcrypt = Bcrypt(app)\napp.secret_key = 'something secret10'\nDATABASE = 'exam_quote_dash'\nEMAIL_REGEX = re.compile('^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\\\.[a-zA-Z]+$')\n\n\n@app.route('/')\ndef signin():\n return render_template('index.html')\n\n\n@app.route('/register', methods=['POST'])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['last_name']) < 2:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if len(request.form['password']) < 8:\n is_valid = False\n flash('password must be atleast 8 characters long.')\n if request.form['password'] != request.form['confirm_password']:\n is_valid = False\n flash('passwords do not match.')\n if not is_valid:\n return redirect('/')\n else:\n flash('sucessfully added')\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = (\n 'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'\n )\n data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[\n 'first_name'], 'ln': request.form['last_name']}\n id_users = mysql.query_db(query, data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name']\n return redirect('/quotes')\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\n@app.route('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\n@app.route('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\n@app.route('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\n@app.route('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\n@app.route('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\n@app.route('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request, redirect, flash, session\nfrom mysqlconnection import connectToMySQL\nfrom flask_bcrypt import Bcrypt\nimport re\n\napp = Flask(__name__)\nbcrypt = Bcrypt(app)\napp.secret_key = \"something secret10\"\nDATABASE = \"exam_quote_dash\"\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$') \n\n#users\n# id_users, first_name, last_name, email, password\n\n#quotes\n#id_quotes, from_user, liked_from, content, author\n\n@app.route(\"/\")\ndef signin():\n return render_template(\"index.html\")\n\n@app.route(\"/register\", methods=[\"POST\"])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n \tis_valid = False\n \tflash(\"please enter your first name.\")\n if len(request.form['last_name']) < 2:\n \tis_valid = False\n \tflash(\"please enter your last name.\")\n if not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid email address!\")\n if len(request.form['password']) < 8:\n \tis_valid = False\n \tflash(\"password must be atleast 8 characters long.\")\n if (request.form['password'] != request.form['confirm_password']):\n \tis_valid = False\n \tflash(\"passwords do not match.\")\n if not is_valid:\n return redirect('/')\n else:\n flash(\"sucessfully added\")\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = \"INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);\"\n data = {\n 'em': request.form['email'],\n 'pw': pw_hash,\n 'fn': request.form['first_name'],\n 'ln': request.form['last_name']\n }\n id_users = mysql.query_db(query,data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name'] \n\n return redirect('/quotes')\n\n@app.route('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM users WHERE email = %(em)s;\"\n data = {\n 'em': request.form['email']\n }\n result = mysql.query_db(query, data)\n\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form['password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash(\"Email and/or password does not match.\")\n return redirect('/')\n else:\n flash(\"Please enter your registered Email.\")\n return redirect('/')\n\n@app.route('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n@app.route('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM quotes JOIN users ON from_user = id_users;\"\n join = mysql.query_db(query)\n\n return render_template('quotes.html', joined = join)\n\n@app.route('/create', methods=['POST'])\ndef create():\n is_valid = True\n\n if len(request.form['content']) < 10:\n flash(\"quotes are required to be longer than 10 characters.\")\n is_valid == False\n\n if is_valid == True: \n mysql = connectToMySQL(DATABASE)\n query = \"INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);\"\n data = {\n 'quo': request.form['content'],\n 'auth': request.form['author'],\n\n 'from': session['id_users']\n }\n mysql.query_db(query, data)\n\n return redirect('/quotes')\n\n@app.route('/delete/<id>/<thing>')\ndef delete(id,thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = \"DELETE FROM quotes WHERE id_quotes = %(id)s;\"\n data = {\n 'id': id\n } \n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n@app.route(\"/edit\")\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * From users WHERE id_users = %(id)s\"\n data ={ \n 'id' : session['id_users']\n }\n users_table = mysql.query_db(query, data)\n\n\n return render_template('edit_account.html', users = users_table)\n\n@app.route(\"/update\", methods=[\"POST\"])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n \tis_valid = False\n \tflash(\"please enter your first name.\")\n if len(request.form['l_name']) < 3:\n \tis_valid = False\n \tflash(\"please enter your last name.\")\n if not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid email address!\")\n if not is_valid:\n return redirect('/edit')\n else:\n flash(\"sucessfully updated\")\n mysql = connectToMySQL(DATABASE)\n query = \"UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;\"\n data = {\n \"fn\": request.form[\"f_name\"],\n \"ln\": request.form[\"l_name\"],\n \"em\": request.form[\"email\"],\n 'id' : session['id_users']\n }\n id = mysql.query_db(query, data)\n\n session['greeting'] = request.form['f_name'] \n return redirect('/quotes')\n\n@app.route(\"/my_posts\")\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM quotes WHERE from_user = %(id)s;\"\n data ={ \n 'id' : session['id_users']\n }\n my_quotes = mysql.query_db(query, data)\n\n return render_template('my_posts.html', quotes = my_quotes)\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\nif __name__==\"__main__\": \n app.run(debug=True) ",
"step-ids": [
9,
10,
12,
13,
15
]
}
|
[
9,
10,
12,
13,
15
] |
<|reserved_special_token_0|>
class ModuleChecker(misc.WrapperModuleChecker):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName
) or node_left.name not in ('_inherit', '_name') or not isinstance(
node.value, astroid.node_classes.Const) or not isinstance(node.
parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
return
key = self.odoo_node, _inherit
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file, os.path.dirname(
odoo_node.file))
path_nodes.append('%s:%d' % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited', node=
nodes[0], args=(class_dup_name, ', '.join(path_nodes)))
<|reserved_special_token_0|>
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node, args
=self.odoo_module_name)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _is_module_name_in_whitelist(self, module_name):
parts = module_name.split('.')
module_names_to_check = ['.'.join(parts[:first_k]) for first_k in
range(len(parts), 0, -1)]
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
<|reserved_special_token_0|>
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if not handler.name and len(handler.body) == 1 and isinstance(
handler.body[0], astroid.node_classes.Pass):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(os.path.join(self.module_path,
rst_file))
for error in errors:
msg = error.full_message
res = re.search(
'No directive entry for "([\\w|\\-]+)"|Unknown directive type "([\\w|\\-]+)"|No role entry for "([\\w|\\-]+)"|Unknown interpreted text role "([\\w|\\-]+)"'
, msg)
if res:
continue
self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((xml_file, result.strip('\n').replace(
'\n', '|')))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,
self.module):
self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)
)
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
if ir_filter_fields and len(ir_filter_fields) == 1:
self.msg_args = '%s:%d' % (xml_file, ir_filter_record.
sourceline), ir_filter_record.get('id')
return False
return True
<|reserved_special_token_0|>
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = arch.xpath(
".//field[@name='name' and @position='replace'][1]") + arch.xpath(
".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(os.path.join(self.module_path,
xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append(('%s:%s' % (xml_file, view.
sourceline), priority, self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='res.users')
self.msg_args.extend([('%s:%s' % (xml_file, user_record.
sourceline)) for user_record in user_records if user_record
.xpath("field[@name='name']") and 'no_reset_password' not in
(user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types
) else []
children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[
0].findall('data')) if odoo_nodes else ([], [])
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append('%s:%s' % (xml_file, lineno))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append('%s:%d' % (ext_file_rel,
countline))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
if not fname.endswith('.xml'):
continue
referenced_files.update(self.
_get_xml_referenced_files_report(fname, data_type))
return referenced_files
<|reserved_special_token_0|>
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
<|reserved_special_token_0|>
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions != ['8.0']:
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',
'6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':
'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',
'8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':
'.//tree[@string]'}]
valid_versions = set(self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions)
applicable_checks = [check for check in checks if check['attr'] in
self.config.deprecated_tree_attributes and bool(valid_versions -
check['skip_versions'])]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), check['attr']))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModuleChecker(misc.WrapperModuleChecker):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName
) or node_left.name not in ('_inherit', '_name') or not isinstance(
node.value, astroid.node_classes.Const) or not isinstance(node.
parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
return
key = self.odoo_node, _inherit
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file, os.path.dirname(
odoo_node.file))
path_nodes.append('%s:%d' % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited', node=
nodes[0], args=(class_dup_name, ', '.join(path_nodes)))
<|reserved_special_token_0|>
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node, args
=self.odoo_module_name)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _is_module_name_in_whitelist(self, module_name):
parts = module_name.split('.')
module_names_to_check = ['.'.join(parts[:first_k]) for first_k in
range(len(parts), 0, -1)]
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
<|reserved_special_token_0|>
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if not handler.name and len(handler.body) == 1 and isinstance(
handler.body[0], astroid.node_classes.Pass):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(os.path.join(self.module_path,
rst_file))
for error in errors:
msg = error.full_message
res = re.search(
'No directive entry for "([\\w|\\-]+)"|Unknown directive type "([\\w|\\-]+)"|No role entry for "([\\w|\\-]+)"|Unknown interpreted text role "([\\w|\\-]+)"'
, msg)
if res:
continue
self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((xml_file, result.strip('\n').replace(
'\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',
''), record.attrib.get('id', ''), record.getparent().attrib
.get('noupdate', '0'))
all_records.setdefault(record_id, []).append(record)
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,
self.module):
self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)
)
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field', 'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(record
.xpath(xpath)).items():
self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]
.sourceline), name[0], ', '.join([str(fobj.
sourceline) for fobj in fobjs[1:]])))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
if ir_filter_fields and len(ir_filter_fields) == 1:
self.msg_args = '%s:%d' % (xml_file, ir_filter_record.
sourceline), ir_filter_record.get('id')
return False
return True
<|reserved_special_token_0|>
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = arch.xpath(
".//field[@name='name' and @position='replace'][1]") + arch.xpath(
".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(os.path.join(self.module_path,
xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append(('%s:%s' % (xml_file, view.
sourceline), priority, self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='res.users')
self.msg_args.extend([('%s:%s' % (xml_file, user_record.
sourceline)) for user_record in user_records if user_record
.xpath("field[@name='name']") and 'no_reset_password' not in
(user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types
) else []
children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[
0].findall('data')) if odoo_nodes else ([], [])
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append('%s:%s' % (xml_file, lineno))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append('%s:%d' % (ext_file_rel,
countline))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
if not fname.endswith('.xml'):
continue
referenced_files.update(self.
_get_xml_referenced_files_report(fname, data_type))
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):
data_type for attribute in ['xml', 'xsl'] for record in self.
parse_xml(os.path.join(self.module_path, fname)).xpath(
'//report[@%s]' % attribute)}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
<|reserved_special_token_0|>
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions != ['8.0']:
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',
'6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':
'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',
'8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':
'.//tree[@string]'}]
valid_versions = set(self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions)
applicable_checks = [check for check in checks if check['attr'] in
self.config.deprecated_tree_attributes and bool(valid_versions -
check['skip_versions'])]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), check['attr']))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModuleChecker(misc.WrapperModuleChecker):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName
) or node_left.name not in ('_inherit', '_name') or not isinstance(
node.value, astroid.node_classes.Const) or not isinstance(node.
parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
return
key = self.odoo_node, _inherit
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file, os.path.dirname(
odoo_node.file))
path_nodes.append('%s:%d' % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited', node=
nodes[0], args=(class_dup_name, ', '.join(path_nodes)))
<|reserved_special_token_0|>
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node, args
=self.odoo_module_name)
@staticmethod
def _is_absolute_import(node, name):
modnode = node.root()
importedmodnode = ModuleChecker._get_imported_module(node, name)
if (importedmodnode and importedmodnode.file and modnode is not
importedmodnode and importedmodnode.name != name):
return True
return False
<|reserved_special_token_0|>
def _is_module_name_in_whitelist(self, module_name):
parts = module_name.split('.')
module_names_to_check = ['.'.join(parts[:first_k]) for first_k in
range(len(parts), 0, -1)]
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
def _check_imported_packages(self, node, module_name):
"""Check if the import node is a external dependency to validate it"""
if not module_name:
return
if not self.manifest_dict:
return
if not isinstance(node.parent, astroid.Module):
return
if self._is_absolute_import(node, module_name):
return
if self._is_module_name_in_whitelist(module_name):
return
isort_obj = isort.SortImports(file_contents='')
import_category = isort_obj.place_module(module_name)
if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):
return
relpath = os.path.relpath(node.parent.file, os.path.dirname(self.
manifest_file))
if os.path.dirname(relpath) == 'tests':
return
self.add_message('missing-import-error', node=node, args=(module_name,)
)
ext_deps = self.manifest_dict.get('external_dependencies') or {}
py_ext_deps = ext_deps.get('python') or []
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
return
if module_name not in py_ext_deps and module_name.split('.')[0
] not in py_ext_deps:
self.add_message('missing-manifest-dependency', node=node, args
=(module_name,))
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if not handler.name and len(handler.body) == 1 and isinstance(
handler.body[0], astroid.node_classes.Pass):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(os.path.join(self.module_path,
rst_file))
for error in errors:
msg = error.full_message
res = re.search(
'No directive entry for "([\\w|\\-]+)"|Unknown directive type "([\\w|\\-]+)"|No role entry for "([\\w|\\-]+)"|Unknown interpreted text role "([\\w|\\-]+)"'
, msg)
if res:
continue
self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((xml_file, result.strip('\n').replace(
'\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',
''), record.attrib.get('id', ''), record.getparent().attrib
.get('noupdate', '0'))
all_records.setdefault(record_id, []).append(record)
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
def _check_duplicate_xml_record_id(self):
"""Check duplicated XML-IDs inside of the files of
each manifest-section treated them separately
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_records = []
for fname, section in self._get_manifest_referenced_files().items():
if os.path.splitext(fname)[1].lower() != '.xml':
continue
fname = os.path.join(self.module_path, fname)
for xml_record in self.get_xml_records(fname):
xml_record.attrib['section'] = section
xml_records.append(xml_record)
for name, fobjs in self._get_duplicate_xml_record_id(xml_records
).items():
self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,
self.module_path), fobjs[0].sourceline), name, ', '.join([(
os.path.relpath(fobj.base, self.module_path) + ':' + str(
fobj.sourceline)) for fobj in fobjs[1:]])))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,
self.module):
self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)
)
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _get_duplicate_xml_fields(self, fields):
"""Get duplicated xml fields based on attribute name
:param fields list: List of lxml.etree.Element "<field"
:return: Duplicated items.
e.g. {field.name: [field_node1, field_node2]}
:rtype: dict
"""
all_fields = {}
for field in fields:
field_xml = field.attrib.get('name')
if not field_xml:
continue
all_fields.setdefault((field_xml, field.attrib.get('context'),
field.attrib.get('filter_domain'), field.getparent()), []
).append(field)
return dict(((name, context, filter_domain, parent_node), nodes) for
(name, context, filter_domain, parent_node), nodes in
all_fields.items() if len(nodes) >= 2)
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field', 'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(record
.xpath(xpath)).items():
self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]
.sourceline), name[0], ', '.join([str(fobj.
sourceline) for fobj in fobjs[1:]])))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
if ir_filter_fields and len(ir_filter_fields) == 1:
self.msg_args = '%s:%d' % (xml_file, ir_filter_record.
sourceline), ir_filter_record.get('id')
return False
return True
<|reserved_special_token_0|>
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = arch.xpath(
".//field[@name='name' and @position='replace'][1]") + arch.xpath(
".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(os.path.join(self.module_path,
xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append(('%s:%s' % (xml_file, view.
sourceline), priority, self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='res.users')
self.msg_args.extend([('%s:%s' % (xml_file, user_record.
sourceline)) for user_record in user_records if user_record
.xpath("field[@name='name']") and 'no_reset_password' not in
(user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types
) else []
children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[
0].findall('data')) if odoo_nodes else ([], [])
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append('%s:%s' % (xml_file, lineno))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append('%s:%d' % (ext_file_rel,
countline))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
if not fname.endswith('.xml'):
continue
referenced_files.update(self.
_get_xml_referenced_files_report(fname, data_type))
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):
data_type for attribute in ['xml', 'xsl'] for record in self.
parse_xml(os.path.join(self.module_path, fname)).xpath(
'//report[@%s]' % attribute)}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
def _check_file_not_used(self):
"""Check if a file is not used from manifest"""
module_files = set(self._get_module_files())
referenced_files = set(self._get_manifest_referenced_files()).union(set
(self._get_xml_referenced_files()))
excluded_dirs = ['static', 'test', 'tests', 'migrations']
no_referenced_files = [f for f in module_files - referenced_files if
f.split(os.path.sep)[0] not in excluded_dirs]
self.msg_args = no_referenced_files
return not no_referenced_files
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions != ['8.0']:
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',
'6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':
'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',
'8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':
'.//tree[@string]'}]
valid_versions = set(self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions)
applicable_checks = [check for check in checks if check['attr'] in
self.config.deprecated_tree_attributes and bool(valid_versions -
check['skip_versions'])]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), check['attr']))
if self.msg_args:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModuleChecker(misc.WrapperModuleChecker):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName
) or node_left.name not in ('_inherit', '_name') or not isinstance(
node.value, astroid.node_classes.Const) or not isinstance(node.
parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
return
key = self.odoo_node, _inherit
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file, os.path.dirname(
odoo_node.file))
path_nodes.append('%s:%d' % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited', node=
nodes[0], args=(class_dup_name, ', '.join(path_nodes)))
def _get_odoo_module_imported(self, node):
odoo_module = []
if isinstance(node, astroid.ImportFrom) and ('openerp.addons' in
node.modname or 'odoo.addons' in node.modname):
packages = node.modname.split('.')
if len(packages) >= 3:
odoo_module.append(packages[2])
else:
odoo_module.append(node.names[0][0])
elif isinstance(node, astroid.Import):
for name, _ in node.names:
if 'openerp.addons' not in name and 'odoo.addons' not in name:
continue
packages = name.split('.')
if len(packages) >= 3:
odoo_module.append(packages[2])
return odoo_module
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node, args
=self.odoo_module_name)
@staticmethod
def _is_absolute_import(node, name):
modnode = node.root()
importedmodnode = ModuleChecker._get_imported_module(node, name)
if (importedmodnode and importedmodnode.file and modnode is not
importedmodnode and importedmodnode.name != name):
return True
return False
@staticmethod
def _get_imported_module(importnode, modname):
try:
return importnode.do_import_module(modname)
except:
pass
def _is_module_name_in_whitelist(self, module_name):
parts = module_name.split('.')
module_names_to_check = ['.'.join(parts[:first_k]) for first_k in
range(len(parts), 0, -1)]
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
def _check_imported_packages(self, node, module_name):
"""Check if the import node is a external dependency to validate it"""
if not module_name:
return
if not self.manifest_dict:
return
if not isinstance(node.parent, astroid.Module):
return
if self._is_absolute_import(node, module_name):
return
if self._is_module_name_in_whitelist(module_name):
return
isort_obj = isort.SortImports(file_contents='')
import_category = isort_obj.place_module(module_name)
if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):
return
relpath = os.path.relpath(node.parent.file, os.path.dirname(self.
manifest_file))
if os.path.dirname(relpath) == 'tests':
return
self.add_message('missing-import-error', node=node, args=(module_name,)
)
ext_deps = self.manifest_dict.get('external_dependencies') or {}
py_ext_deps = ext_deps.get('python') or []
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
return
if module_name not in py_ext_deps and module_name.split('.')[0
] not in py_ext_deps:
self.add_message('missing-manifest-dependency', node=node, args
=(module_name,))
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error', 'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if not handler.name and len(handler.body) == 1 and isinstance(
handler.body[0], astroid.node_classes.Pass):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(os.path.join(self.module_path,
rst_file))
for error in errors:
msg = error.full_message
res = re.search(
'No directive entry for "([\\w|\\-]+)"|Unknown directive type "([\\w|\\-]+)"|No role entry for "([\\w|\\-]+)"|Unknown interpreted text role "([\\w|\\-]+)"'
, msg)
if res:
continue
self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _check_missing_readme(self):
"""Check if exists ./README.{rst,md,txt} file
:return: If exists return True else False
"""
self.msg_args = self.config.readme_template_url,
for readme in DFTL_README_FILES:
if os.path.isfile(os.path.join(self.module_path, readme)):
return True
return False
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((xml_file, result.strip('\n').replace(
'\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',
''), record.attrib.get('id', ''), record.getparent().attrib
.get('noupdate', '0'))
all_records.setdefault(record_id, []).append(record)
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
def _check_duplicate_xml_record_id(self):
"""Check duplicated XML-IDs inside of the files of
each manifest-section treated them separately
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_records = []
for fname, section in self._get_manifest_referenced_files().items():
if os.path.splitext(fname)[1].lower() != '.xml':
continue
fname = os.path.join(self.module_path, fname)
for xml_record in self.get_xml_records(fname):
xml_record.attrib['section'] = section
xml_records.append(xml_record)
for name, fobjs in self._get_duplicate_xml_record_id(xml_records
).items():
self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,
self.module_path), fobjs[0].sourceline), name, ', '.join([(
os.path.relpath(fobj.base, self.module_path) + ':' + str(
fobj.sourceline)) for fobj in fobjs[1:]])))
if self.msg_args:
return False
return True
def _check_duplicate_id_csv(self):
"""Check duplicate xml id in ir.model.access.csv files of a odoo module.
:return: False if exists errors and
add list of errors in self.msg_args
"""
all_csv_ids = []
self.msg_args = []
for csv_file_rel in self.filter_files_ext('csv', relpath=True):
csv_file = os.path.join(self.module_path, csv_file_rel)
if os.path.basename(csv_file) == 'ir.model.access.csv':
all_csv_ids.extend(self.get_field_csv(csv_file))
duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)
for duplicated_id_csv in duplicated_ids_csv:
self.msg_args.append((csv_file_rel, duplicated_id_csv))
if duplicated_ids_csv:
return False
return True
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,
self.module):
self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)
)
if self.msg_args:
return False
return True
def _check_character_not_valid_in_resource_link(self):
"""The resource in in src/href contains a not valid chararter"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml'):
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
for name, attr in (('link', 'href'), ('script', 'src')):
nodes = doc.xpath('.//%s[@%s]' % (name, attr)
) if not isinstance(doc, string_types) else []
for node in nodes:
resource = node.get(attr, '')
ext = os.path.splitext(os.path.basename(resource))[1]
if resource.startswith('/') and not re.search(
'^[.][a-zA-Z]+$', ext):
self.msg_args.append('%s:%s' % (xml_file, node.
sourceline))
if self.msg_args:
return False
return True
def _get_duplicate_xml_fields(self, fields):
"""Get duplicated xml fields based on attribute name
:param fields list: List of lxml.etree.Element "<field"
:return: Duplicated items.
e.g. {field.name: [field_node1, field_node2]}
:rtype: dict
"""
all_fields = {}
for field in fields:
field_xml = field.attrib.get('name')
if not field_xml:
continue
all_fields.setdefault((field_xml, field.attrib.get('context'),
field.attrib.get('filter_domain'), field.getparent()), []
).append(field)
return dict(((name, context, filter_domain, parent_node), nodes) for
(name, context, filter_domain, parent_node), nodes in
all_fields.items() if len(nodes) >= 2)
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field', 'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(record
.xpath(xpath)).items():
self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]
.sourceline), name[0], ', '.join([str(fobj.
sourceline) for fobj in fobjs[1:]])))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
if ir_filter_fields and len(ir_filter_fields) == 1:
self.msg_args = '%s:%d' % (xml_file, ir_filter_record.
sourceline), ir_filter_record.get('id')
return False
return True
@staticmethod
def _get_priority(view):
try:
priority_node = view.xpath("field[@name='priority'][1]")[0]
return int(priority_node.get('eval', priority_node.text) or 0)
except (IndexError, ValueError):
pass
return 0
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = arch.xpath(
".//field[@name='name' and @position='replace'][1]") + arch.xpath(
".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(os.path.join(self.module_path,
xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append(('%s:%s' % (xml_file, view.
sourceline), priority, self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(os.path.join(self.
module_path, xml_file), model='res.users')
self.msg_args.extend([('%s:%s' % (xml_file, user_record.
sourceline)) for user_record in user_records if user_record
.xpath("field[@name='name']") and 'no_reset_password' not in
(user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types
) else []
children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[
0].findall('data')) if odoo_nodes else ([], [])
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append('%s:%s' % (xml_file, lineno))
if self.msg_args:
return False
return True
def _check_deprecated_openerp_xml_node(self):
"""Check deprecated <openerp> xml node
:return: False if exists <openerp> node and
add list of xml files in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
openerp_nodes = doc.xpath('/openerp') if not isinstance(doc,
string_types) else []
if openerp_nodes:
lineno = openerp_nodes[0].sourceline
self.msg_args.append('%s:%s' % (xml_file, lineno))
if self.msg_args:
return False
return True
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append('%s:%d' % (ext_file_rel,
countline))
if self.msg_args:
return False
return True
def _check_missing_newline_extrafiles(self):
"""Check missing newline in other ext files (.xml, .csv, .po)
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
last_line = ''
with open(ext_file, 'rb') as fp:
if os.stat(ext_file).st_size > 1:
fp.seek(-2, os.SEEK_END)
last_line = fp.readline()
if not (last_line.endswith(b'\n') or last_line.
endswith(b'\r')):
self.msg_args.append((ext_file_rel,))
if self.msg_args:
return False
return True
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in (self.manifest_dict.get(data_type) or []):
if not fname.endswith('.xml'):
continue
referenced_files.update(self.
_get_xml_referenced_files_report(fname, data_type))
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):
data_type for attribute in ['xml', 'xsl'] for record in self.
parse_xml(os.path.join(self.module_path, fname)).xpath(
'//report[@%s]' % attribute)}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
def _check_file_not_used(self):
"""Check if a file is not used from manifest"""
module_files = set(self._get_module_files())
referenced_files = set(self._get_manifest_referenced_files()).union(set
(self._get_xml_referenced_files()))
excluded_dirs = ['static', 'test', 'tests', 'migrations']
no_referenced_files = [f for f in module_files - referenced_files if
f.split(os.path.sep)[0] not in excluded_dirs]
self.msg_args = no_referenced_files
return not no_referenced_files
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions != ['8.0']:
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',
'6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':
'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',
'8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':
'.//tree[@string]'}]
valid_versions = set(self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions)
applicable_checks = [check for check in checks if check['attr'] in
self.config.deprecated_tree_attributes and bool(valid_versions -
check['skip_versions'])]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(os.path.join(self.
module_path, xml_file), model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append(('%s:%d' % (xml_file, record.
sourceline), check['attr']))
if self.msg_args:
return False
return True
def _check_xml_deprecated_qweb_directive(self):
"""Check for use of deprecated QWeb directives t-*-options.
:return: False if deprecated directives are found, in which case
self.msg_args will contain the error messages.
"""
valid_versions = set(self.linter._all_options['valid_odoo_versions'
].config.valid_odoo_versions)
if not valid_versions & {'10.0', '11.0'}:
return True
deprecated_directives = {'t-esc-options', 't-field-options',
't-raw-options'}
directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)
xpath = '|'.join('/%s//template//*[%s]' % (tag, directive_attrs) for
tag in ('odoo', 'openerp'))
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=False):
doc = self.parse_xml(xml_file)
if isinstance(doc, string_types):
continue
for node in doc.xpath(xpath):
directive = next(iter(set(node.attrib) & deprecated_directives)
)
self.msg_args.append(('%s:%d' % (xml_file, node.sourceline),
directive))
return not bool(self.msg_args)
<|reserved_special_token_1|>
"""Visit module to add odoo checks
"""
import os
import re
import astroid
import isort
from pylint.checkers import utils
from six import string_types
from .. import misc, settings
ODOO_MSGS = {
# C->convention R->refactor W->warning E->error F->fatal
# Visit odoo module with settings.BASE_OMODULE_ID
'C%d02' % settings.BASE_OMODULE_ID: (
'Missing ./README.rst file. Template here: %s',
'missing-readme',
settings.DESC_DFLT
),
'E%d01' % settings.BASE_OMODULE_ID: (
'%s %s',
'rst-syntax-error',
settings.DESC_DFLT
),
'E%d02' % settings.BASE_OMODULE_ID: (
'%s error: %s',
'xml-syntax-error',
settings.DESC_DFLT
),
'W%d01' % settings.BASE_OMODULE_ID: (
'%s Dangerous filter without explicit `user_id` in xml_id %s',
'dangerous-filter-wo-user',
settings.DESC_DFLT
),
'W%d02' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml record id "%s" in %s',
'duplicate-xml-record-id',
settings.DESC_DFLT
),
'W%d03' % settings.BASE_OMODULE_ID: (
'%s',
'javascript-lint',
settings.DESC_DFLT
),
'W%d04' % settings.BASE_OMODULE_ID: (
'%s Deprecated <openerp> xml node',
'deprecated-openerp-xml-node',
settings.DESC_DFLT
),
'W%d05' % settings.BASE_OMODULE_ID: (
'%s record res.users without '
'context="{\'no_reset_password\': True}"',
'create-user-wo-reset-password',
settings.DESC_DFLT
),
'W%d06' % settings.BASE_OMODULE_ID: (
'%s Duplicate id "%s"',
'duplicate-id-csv',
settings.DESC_DFLT
),
'W%d07' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml field "%s" in lines %s',
'duplicate-xml-fields',
settings.DESC_DFLT
),
'W%d08' % settings.BASE_OMODULE_ID: (
'%s Missing newline',
'missing-newline-extrafiles',
settings.DESC_DFLT
),
'W%d09' % settings.BASE_OMODULE_ID: (
'%s Redundant name module reference in xml_ids "%s".',
'redundant-modulename-xml',
settings.DESC_DFLT
),
'W%d10' % settings.BASE_OMODULE_ID: (
'%s Use wrong tabs indentation instead of four spaces',
'wrong-tabs-instead-of-spaces',
settings.DESC_DFLT
),
'R%d80' % settings.BASE_OMODULE_ID: (
'Consider merging classes inherited to "%s" from %s.',
'consider-merging-classes-inherited',
settings.DESC_DFLT
),
'W%d50' % settings.BASE_OMODULE_ID: (
'Same Odoo module absolute import. You should use '
'relative import with "." '
'instead of "openerp.addons.%s"',
'odoo-addons-relative-import',
settings.DESC_DFLT
),
'W%d40' % settings.BASE_OMODULE_ID: (
'%s Dangerous use of "replace" from view '
'with priority %s < %s. '
'Increase priority or don\'t use "replace". '
'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',
'dangerous-view-replace-wo-priority',
settings.DESC_DFLT
),
'W%d30' % settings.BASE_OMODULE_ID: (
'%s not used from manifest',
'file-not-used',
settings.DESC_DFLT
),
'W%d35' % settings.BASE_OMODULE_ID: (
'External dependency "%s" without ImportError. More info: '
'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'
'#external-dependencies',
'missing-import-error',
settings.DESC_DFLT
),
'W%d36' % settings.BASE_OMODULE_ID: (
'Missing external dependency "%s" from manifest. More info: '
'https://github.com/OCA/odoo-community.org/blob/master/website/'
'Contribution/CONTRIBUTING.rst'
'#external-dependencies',
'missing-manifest-dependency',
settings.DESC_DFLT
),
'W%d38' % settings.BASE_OMODULE_ID: (
'pass into block except. '
'If you really need to use the pass consider logging that exception',
'except-pass',
settings.DESC_DFLT
),
'W%d37' % settings.BASE_OMODULE_ID: (
'%s The xml attribute is missing the translation="off" tag %s',
'xml-attribute-translatable',
settings.DESC_DFLT
),
'W%d42' % settings.BASE_OMODULE_ID: (
'%s Deprecated <tree> xml attribute "%s"',
'xml-deprecated-tree-attribute',
settings.DESC_DFLT
),
'W%d43' % settings.BASE_OMODULE_ID: (
'%s Deprecated QWeb directive "%s". Use "t-options" instead',
'xml-deprecated-qweb-directive',
settings.DESC_DFLT
),
'W%d39' % settings.BASE_OMODULE_ID: (
'%s Use <odoo> instead of <odoo><data> or use <odoo noupdate="1">'
'instead of <odoo><data noupdate="1">',
'deprecated-data-xml-node',
settings.DESC_DFLT
),
'W%d44' % settings.BASE_OMODULE_ID: (
'%s The resource in in src/href contains a not valid chararter',
'character-not-valid-in-resource-link',
settings.DESC_DFLT
),
}
DFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \
'/blob/master/template/module/README.rst'
DFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']
DFTL_MIN_PRIORITY = 99
# Files supported from manifest to convert
# Extracted from openerp/tools/convert.py:def convert_file
DFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']
DFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [
'po', 'js', 'mako', 'rst', 'md', 'markdown']
DFLT_IMPORT_NAME_WHITELIST = [
# self-odoo
'odoo', 'openerp',
# packages for unit tests only
'requests_mock',
# Known external packages of odoo
'PIL', 'anybox.testing.openerp', 'argparse', 'babel',
'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',
'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',
'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',
'ofxparse', 'openid', 'passlib', 'pkg_resources',
'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',
'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',
'requests', 'serial', 'simplejson', 'six', 'suds',
'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',
'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',
]
DFTL_JSLINTRC = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'examples', '.jslintrc'
)
DFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']
DFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',
'update_xml']
class ModuleChecker(misc.WrapperModuleChecker):
name = settings.CFG_SECTION
msgs = ODOO_MSGS
options = (
('readme_template_url', {
'type': 'string',
'metavar': '<string>',
'default': DFTL_README_TMPL_URL,
'help': 'URL of README.rst template file',
}),
('extfiles_to_lint', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_TO_LINT,
'help': 'List of extension files to check separated by a comma.'
}),
('min-priority', {
'type': 'int',
'metavar': '<int>',
'default': DFTL_MIN_PRIORITY,
'help': 'Minimum priority number of a view with replace of fields.'
}),
('extfiles_convert', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_CONVERT,
'help': 'List of extension files supported to convert '
'from manifest separated by a comma.'
}),
('import_name_whitelist', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_IMPORT_NAME_WHITELIST,
'help': 'List of known import dependencies of odoo,'
' separated by a comma.'
}),
('jslintrc', {
'type': 'string',
'metavar': '<path to file>',
'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,
'help': ('A path to a file that contains a configuration file of '
'javascript lint. You can use the environment variable '
'"PYLINT_ODOO_JSLINTRC" too. Default: %s' % DFTL_JSLINTRC)
}),
('deprecated_tree_attributes', {
'type': 'multiple_choice',
'metavar': '<attributes>',
'default': DFLT_DEPRECATED_TREE_ATTRS,
'choices': DFLT_DEPRECATED_TREE_ATTRS,
'help': 'List of deprecated list view attributes,'
' separated by a comma. Valid values: %s' % ', '.join(
DFLT_DEPRECATED_TREE_ATTRS)
}),
)
odoo_check_versions = {
'missing-import-error': {
'max_odoo_version': '11.0',
},
}
class_inherit_names = []
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName) or \
node_left.name not in ('_inherit', '_name') or \
not isinstance(node.value, astroid.node_classes.Const) or \
not isinstance(node.parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
# Skip _name='model.name' _inherit='other.model' because is valid
return
key = (self.odoo_node, _inherit)
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file,
os.path.dirname(odoo_node.file))
path_nodes.append("%s:%d" % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited',
node=nodes[0],
args=(class_dup_name, ', '.join(path_nodes)))
def _get_odoo_module_imported(self, node):
odoo_module = []
if isinstance(node, astroid.ImportFrom) and \
('openerp.addons' in node.modname or
'odoo.addons' in node.modname):
packages = node.modname.split('.')
if len(packages) >= 3:
# from openerp.addons.odoo_module import models
odoo_module.append(packages[2])
else:
# from openerp.addons import odoo_module
odoo_module.append(node.names[0][0])
elif isinstance(node, astroid.Import):
for name, _ in node.names:
if 'openerp.addons' not in name and 'odoo.addons' not in name:
continue
packages = name.split('.')
if len(packages) >= 3:
# import openerp.addons.odoo_module
odoo_module.append(packages[2])
return odoo_module
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node,
args=(self.odoo_module_name))
@staticmethod
def _is_absolute_import(node, name):
modnode = node.root()
importedmodnode = ModuleChecker._get_imported_module(node, name)
if importedmodnode and importedmodnode.file and \
modnode is not importedmodnode and \
importedmodnode.name != name:
return True
return False
@staticmethod
def _get_imported_module(importnode, modname):
try:
return importnode.do_import_module(modname)
except:
pass
def _is_module_name_in_whitelist(self, module_name):
# Try to find most specific placement instruction match (if any)
# (from isort place_module() method)
parts = module_name.split('.')
module_names_to_check = [
'.'.join(parts[:first_k])
for first_k in range(len(parts), 0, -1)
]
# Check if one of the module name is part of the whitelist.
# For an module name such as 'anybox.testing.openerp', the
# modules names to check will be:
# ['anybox.testing.openerp', 'anybox.testing', 'anybox']
# Only one of them has to be in the whitelist to be accepted.
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
def _check_imported_packages(self, node, module_name):
"""Check if the import node is a external dependency to validate it"""
if not module_name:
# skip local packages because is not a external dependency.
return
if not self.manifest_dict:
# skip if is not a module of odoo
return
if not isinstance(node.parent, astroid.Module):
# skip nested import sentences
return
if self._is_absolute_import(node, module_name):
# skip absolute imports
return
if self._is_module_name_in_whitelist(module_name):
# ignore whitelisted modules
return
isort_obj = isort.SortImports(file_contents='')
import_category = isort_obj.place_module(module_name)
if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):
# skip if is not a external library or is a white list library
return
relpath = os.path.relpath(
node.parent.file, os.path.dirname(self.manifest_file))
if os.path.dirname(relpath) == 'tests':
# import errors rules don't apply to the test files
# since these files are loaded only when running tests
# and in such a case your
# module and their external dependencies are installed.
return
self.add_message('missing-import-error', node=node,
args=(module_name,))
ext_deps = self.manifest_dict.get('external_dependencies') or {}
py_ext_deps = ext_deps.get('python') or []
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
return
if module_name not in py_ext_deps and \
module_name.split('.')[0] not in py_ext_deps:
self.add_message('missing-manifest-dependency', node=node,
args=(module_name,))
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if (not handler.name and
len(handler.body) == 1 and
isinstance(handler.body[0], astroid.node_classes.Pass)):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(
os.path.join(self.module_path, rst_file))
for error in errors:
msg = error.full_message
res = re.search(
r'No directive entry for "([\w|\-]+)"|'
r'Unknown directive type "([\w|\-]+)"|'
r'No role entry for "([\w|\-]+)"|'
r'Unknown interpreted text role "([\w|\-]+)"', msg)
# TODO: Add support for sphinx directives after fix
# https://github.com/twolfson/restructuredtext-lint/issues/29
if res:
# Skip directive errors
continue
self.msg_args.append((
"%s:%d" % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _check_missing_readme(self):
"""Check if exists ./README.{rst,md,txt} file
:return: If exists return True else False
"""
self.msg_args = (self.config.readme_template_url,)
for readme in DFTL_README_FILES:
if os.path.isfile(os.path.join(self.module_path, readme)):
return True
return False
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((
xml_file, result.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = "%s/%s_noupdate_%s" % (
record.attrib.get('section', ''),
record.attrib.get('id', ''),
record.getparent().attrib.get('noupdate', '0'),
)
all_records.setdefault(record_id, []).append(record)
# Remove all keys which not duplicated
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
def _check_duplicate_xml_record_id(self):
"""Check duplicated XML-IDs inside of the files of
each manifest-section treated them separately
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_records = []
for fname, section in self._get_manifest_referenced_files().items():
if os.path.splitext(fname)[1].lower() != '.xml':
continue
fname = os.path.join(self.module_path, fname)
for xml_record in self.get_xml_records(fname):
xml_record.attrib['section'] = section
xml_records.append(xml_record)
for name, fobjs in \
self._get_duplicate_xml_record_id(xml_records).items():
self.msg_args.append((
"%s:%d" % (os.path.relpath(fobjs[0].base, self.module_path),
fobjs[0].sourceline),
name,
', '.join([os.path.relpath(fobj.base, self.module_path) +
':' + str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_duplicate_id_csv(self):
"""Check duplicate xml id in ir.model.access.csv files of a odoo module.
:return: False if exists errors and
add list of errors in self.msg_args
"""
all_csv_ids = []
self.msg_args = []
for csv_file_rel in self.filter_files_ext('csv', relpath=True):
csv_file = os.path.join(self.module_path, csv_file_rel)
if os.path.basename(csv_file) == 'ir.model.access.csv':
all_csv_ids.extend(self.get_field_csv(csv_file))
duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)
for duplicated_id_csv in duplicated_ids_csv:
self.msg_args.append((csv_file_rel, duplicated_id_csv))
if duplicated_ids_csv:
return False
return True
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(
xml_file, self.module):
self.msg_args.append(
("%s:%d" % (xml_file_rel, lineno), xml_id))
if self.msg_args:
return False
return True
def _check_character_not_valid_in_resource_link(self):
"""The resource in in src/href contains a not valid chararter"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml'):
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
for name, attr in (('link', 'href'), ('script', 'src')):
nodes = (doc.xpath('.//%s[@%s]' % (name, attr))
if not isinstance(doc, string_types) else [])
for node in nodes:
resource = node.get(attr, '')
ext = os.path.splitext(os.path.basename(resource))[1]
if (resource.startswith('/') and not
re.search('^[.][a-zA-Z]+$', ext)):
self.msg_args.append(("%s:%s" % (xml_file,
node.sourceline)))
if self.msg_args:
return False
return True
def _get_duplicate_xml_fields(self, fields):
"""Get duplicated xml fields based on attribute name
:param fields list: List of lxml.etree.Element "<field"
:return: Duplicated items.
e.g. {field.name: [field_node1, field_node2]}
:rtype: dict
"""
all_fields = {}
for field in fields:
field_xml = field.attrib.get('name')
if not field_xml:
continue
all_fields.setdefault(
(field_xml, field.attrib.get('context'),
field.attrib.get('filter_domain'),
field.getparent()), []).append(field)
# Remove all keys which not duplicated by excluding them from the
return dict(((name, context, filter_domain, parent_node), nodes) for
(name, context, filter_domain, parent_node), nodes in
all_fields.items() if len(nodes) >= 2)
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field',
'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(
record.xpath(xpath)).items():
self.msg_args.append((
"%s:%d" % (xml_file, fobjs[0].sourceline), name[0],
', '.join([str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
# if exists field="name" then is a new record
# then should be field="user_id" too
if ir_filter_fields and len(ir_filter_fields) == 1:
# TODO: Add a list of msg_args before of return
# TODO: Add source lineno in all xml checks
self.msg_args = (
"%s:%d" % (xml_file, ir_filter_record.sourceline),
ir_filter_record.get('id'),)
return False
return True
@staticmethod
def _get_priority(view):
try:
priority_node = view.xpath("field[@name='priority'][1]")[0]
return int(priority_node.get('eval', priority_node.text) or 0)
except (IndexError, ValueError):
# IndexError: If the field is not found
# ValueError: If the value found is not valid integer
pass
return 0
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = \
arch.xpath(".//field[@name='name' and @position='replace'][1]") + \
arch.xpath(".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append((
"%s:%s" % (xml_file, view.sourceline), priority,
self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='res.users')
# if exists field="name" then is a new record
# then should be context
self.msg_args.extend([
("%s:%s" % (xml_file, user_record.sourceline))
for user_record in user_records
if user_record.xpath("field[@name='name']") and
'no_reset_password' not in (user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath("/odoo") \
if not isinstance(doc, string_types) else []
children, data_node = ((odoo_nodes[0].getchildren(),
odoo_nodes[0].findall('data'))
if odoo_nodes else ([], []))
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_deprecated_openerp_xml_node(self):
"""Check deprecated <openerp> xml node
:return: False if exists <openerp> node and
add list of xml files in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
openerp_nodes = doc.xpath("/openerp") \
if not isinstance(doc, string_types) else []
if openerp_nodes:
lineno = openerp_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append(
("%s:%d" % (ext_file_rel, countline)))
if self.msg_args:
return False
return True
def _check_missing_newline_extrafiles(self):
"""Check missing newline in other ext files (.xml, .csv, .po)
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
last_line = ''
# NOTE: SEEK_END just is supported with 'rb' mode for py3
with open(ext_file, 'rb') as fp:
if os.stat(ext_file).st_size > 1:
fp.seek(-2, os.SEEK_END)
last_line = fp.readline()
if not (last_line.endswith(b'\n') or
last_line.endswith(b'\r')):
self.msg_args.append((ext_file_rel,))
if self.msg_args:
return False
return True
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
if not fname.endswith('.xml'):
continue
referenced_files.update(
self._get_xml_referenced_files_report(fname, data_type)
)
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {
# those files are relative to the addon path
os.path.join(
*record.attrib[attribute].split(os.sep)[1:]
): data_type
for attribute in ['xml', 'xsl']
for record in self.parse_xml(
os.path.join(self.module_path, fname)
)
.xpath('//report[@%s]' % attribute)
}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
def _check_file_not_used(self):
"""Check if a file is not used from manifest"""
module_files = set(self._get_module_files())
referenced_files = set(self._get_manifest_referenced_files()).union(
set(self._get_xml_referenced_files())
)
excluded_dirs = ['static', 'test', 'tests', 'migrations']
no_referenced_files = [
f for f in (module_files - referenced_files)
if f.split(os.path.sep)[0] not in excluded_dirs
]
self.msg_args = no_referenced_files
return not no_referenced_files
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if (self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions != ['8.0']):
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(
("%s:%d" % (xml_file, record.sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [
{
'attr': 'colors',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@colors]',
},
{
'attr': 'fonts',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@fonts]',
},
{
'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},
'xpath': './/tree[@string]',
},
]
valid_versions = set(
self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions)
applicable_checks = [check for check in checks if (
check['attr'] in self.config.deprecated_tree_attributes and
bool(valid_versions - check['skip_versions']))]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file),
model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append((
'%s:%d' % (xml_file, record.sourceline),
check['attr']))
if self.msg_args:
return False
return True
def _check_xml_deprecated_qweb_directive(self):
"""Check for use of deprecated QWeb directives t-*-options.
:return: False if deprecated directives are found, in which case
self.msg_args will contain the error messages.
"""
valid_versions = set(self.linter._all_options[
'valid_odoo_versions'].config.valid_odoo_versions)
if not valid_versions & {'10.0', '11.0'}:
return True
deprecated_directives = {
't-esc-options',
't-field-options',
't-raw-options',
}
directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)
xpath = '|'.join(
'/%s//template//*[%s]' % (tag, directive_attrs)
for tag in ('odoo', 'openerp')
)
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=False):
doc = self.parse_xml(xml_file)
if isinstance(doc, string_types):
continue
for node in doc.xpath(xpath):
# Find which directive was used exactly.
directive = next(
iter(set(node.attrib) & deprecated_directives))
self.msg_args.append((
'%s:%d' % (xml_file, node.sourceline), directive))
return not bool(self.msg_args)
|
flexible
|
{
"blob_id": "9f34f94422f4847859e9111f34ade2e1274cb543",
"index": 8775,
"step-1": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n <mask token>\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n <mask token>\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n <mask token>\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n <mask token>\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n <mask token>\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n <mask token>\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n <mask token>\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n <mask token>\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if (importedmodnode and importedmodnode.file and modnode is not\n importedmodnode and importedmodnode.name != name):\n return True\n return False\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n return\n if not self.manifest_dict:\n return\n if not isinstance(node.parent, astroid.Module):\n return\n if self._is_absolute_import(node, module_name):\n return\n if self._is_module_name_in_whitelist(module_name):\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n return\n relpath = os.path.relpath(node.parent.file, os.path.dirname(self.\n manifest_file))\n if os.path.dirname(relpath) == 'tests':\n return\n self.add_message('missing-import-error', node=node, args=(module_name,)\n )\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and module_name.split('.')[0\n ] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node, args\n =(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in self._get_duplicate_xml_record_id(xml_records\n ).items():\n self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,\n self.module_path), fobjs[0].sourceline), name, ', '.join([(\n os.path.relpath(fobj.base, self.module_path) + ':' + str(\n fobj.sourceline)) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault((field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'), field.getparent()), []\n ).append(field)\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(set\n (self._get_xml_referenced_files()))\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [f for f in module_files - referenced_files if\n f.split(os.path.sep)[0] not in excluded_dirs]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n\n def _get_odoo_module_imported(self, node):\n odoo_module = []\n if isinstance(node, astroid.ImportFrom) and ('openerp.addons' in\n node.modname or 'odoo.addons' in node.modname):\n packages = node.modname.split('.')\n if len(packages) >= 3:\n odoo_module.append(packages[2])\n else:\n odoo_module.append(node.names[0][0])\n elif isinstance(node, astroid.Import):\n for name, _ in node.names:\n if 'openerp.addons' not in name and 'odoo.addons' not in name:\n continue\n packages = name.split('.')\n if len(packages) >= 3:\n odoo_module.append(packages[2])\n return odoo_module\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if (importedmodnode and importedmodnode.file and modnode is not\n importedmodnode and importedmodnode.name != name):\n return True\n return False\n\n @staticmethod\n def _get_imported_module(importnode, modname):\n try:\n return importnode.do_import_module(modname)\n except:\n pass\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n return\n if not self.manifest_dict:\n return\n if not isinstance(node.parent, astroid.Module):\n return\n if self._is_absolute_import(node, module_name):\n return\n if self._is_module_name_in_whitelist(module_name):\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n return\n relpath = os.path.relpath(node.parent.file, os.path.dirname(self.\n manifest_file))\n if os.path.dirname(relpath) == 'tests':\n return\n self.add_message('missing-import-error', node=node, args=(module_name,)\n )\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and module_name.split('.')[0\n ] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node, args\n =(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_readme(self):\n \"\"\"Check if exists ./README.{rst,md,txt} file\n :return: If exists return True else False\n \"\"\"\n self.msg_args = self.config.readme_template_url,\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in self._get_duplicate_xml_record_id(xml_records\n ).items():\n self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,\n self.module_path), fobjs[0].sourceline), name, ', '.join([(\n os.path.relpath(fobj.base, self.module_path) + ':' + str(\n fobj.sourceline)) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_duplicate_id_csv(self):\n \"\"\"Check duplicate xml id in ir.model.access.csv files of a odoo module.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n\n def _check_character_not_valid_in_resource_link(self):\n \"\"\"The resource in in src/href contains a not valid chararter\"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = doc.xpath('.//%s[@%s]' % (name, attr)\n ) if not isinstance(doc, string_types) else []\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if resource.startswith('/') and not re.search(\n '^[.][a-zA-Z]+$', ext):\n self.msg_args.append('%s:%s' % (xml_file, node.\n sourceline))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault((field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'), field.getparent()), []\n ).append(field)\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n\n @staticmethod\n def _get_priority(view):\n try:\n priority_node = view.xpath(\"field[@name='priority'][1]\")[0]\n return int(priority_node.get('eval', priority_node.text) or 0)\n except (IndexError, ValueError):\n pass\n return 0\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_openerp_xml_node(self):\n \"\"\"Check deprecated <openerp> xml node\n :return: False if exists <openerp> node and\n add list of xml files in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath('/openerp') if not isinstance(doc,\n string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_newline_extrafiles(self):\n \"\"\"Check missing newline in other ext files (.xml, .csv, .po)\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n last_line = ''\n with open(ext_file, 'rb') as fp:\n if os.stat(ext_file).st_size > 1:\n fp.seek(-2, os.SEEK_END)\n last_line = fp.readline()\n if not (last_line.endswith(b'\\n') or last_line.\n endswith(b'\\r')):\n self.msg_args.append((ext_file_rel,))\n if self.msg_args:\n return False\n return True\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(set\n (self._get_xml_referenced_files()))\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [f for f in module_files - referenced_files if\n f.split(os.path.sep)[0] not in excluded_dirs]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_qweb_directive(self):\n \"\"\"Check for use of deprecated QWeb directives t-*-options.\n :return: False if deprecated directives are found, in which case\n self.msg_args will contain the error messages.\n \"\"\"\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n deprecated_directives = {'t-esc-options', 't-field-options',\n 't-raw-options'}\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join('/%s//template//*[%s]' % (tag, directive_attrs) for\n tag in ('odoo', 'openerp'))\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n directive = next(iter(set(node.attrib) & deprecated_directives)\n )\n self.msg_args.append(('%s:%d' % (xml_file, node.sourceline),\n directive))\n return not bool(self.msg_args)\n",
"step-5": "\"\"\"Visit module to add odoo checks\n\"\"\"\n\nimport os\nimport re\n\nimport astroid\nimport isort\nfrom pylint.checkers import utils\nfrom six import string_types\n\nfrom .. import misc, settings\n\nODOO_MSGS = {\n # C->convention R->refactor W->warning E->error F->fatal\n\n # Visit odoo module with settings.BASE_OMODULE_ID\n 'C%d02' % settings.BASE_OMODULE_ID: (\n 'Missing ./README.rst file. Template here: %s',\n 'missing-readme',\n settings.DESC_DFLT\n ),\n 'E%d01' % settings.BASE_OMODULE_ID: (\n '%s %s',\n 'rst-syntax-error',\n settings.DESC_DFLT\n ),\n 'E%d02' % settings.BASE_OMODULE_ID: (\n '%s error: %s',\n 'xml-syntax-error',\n settings.DESC_DFLT\n ),\n 'W%d01' % settings.BASE_OMODULE_ID: (\n '%s Dangerous filter without explicit `user_id` in xml_id %s',\n 'dangerous-filter-wo-user',\n settings.DESC_DFLT\n ),\n 'W%d02' % settings.BASE_OMODULE_ID: (\n '%s Duplicate xml record id \"%s\" in %s',\n 'duplicate-xml-record-id',\n settings.DESC_DFLT\n ),\n 'W%d03' % settings.BASE_OMODULE_ID: (\n '%s',\n 'javascript-lint',\n settings.DESC_DFLT\n ),\n 'W%d04' % settings.BASE_OMODULE_ID: (\n '%s Deprecated <openerp> xml node',\n 'deprecated-openerp-xml-node',\n settings.DESC_DFLT\n ),\n 'W%d05' % settings.BASE_OMODULE_ID: (\n '%s record res.users without '\n 'context=\"{\\'no_reset_password\\': True}\"',\n 'create-user-wo-reset-password',\n settings.DESC_DFLT\n ),\n 'W%d06' % settings.BASE_OMODULE_ID: (\n '%s Duplicate id \"%s\"',\n 'duplicate-id-csv',\n settings.DESC_DFLT\n ),\n 'W%d07' % settings.BASE_OMODULE_ID: (\n '%s Duplicate xml field \"%s\" in lines %s',\n 'duplicate-xml-fields',\n settings.DESC_DFLT\n ),\n 'W%d08' % settings.BASE_OMODULE_ID: (\n '%s Missing newline',\n 'missing-newline-extrafiles',\n settings.DESC_DFLT\n ),\n 'W%d09' % settings.BASE_OMODULE_ID: (\n '%s Redundant name module reference in xml_ids \"%s\".',\n 'redundant-modulename-xml',\n settings.DESC_DFLT\n ),\n 'W%d10' % settings.BASE_OMODULE_ID: (\n '%s Use wrong tabs indentation instead of four spaces',\n 'wrong-tabs-instead-of-spaces',\n settings.DESC_DFLT\n ),\n 'R%d80' % settings.BASE_OMODULE_ID: (\n 'Consider merging classes inherited to \"%s\" from %s.',\n 'consider-merging-classes-inherited',\n settings.DESC_DFLT\n ),\n 'W%d50' % settings.BASE_OMODULE_ID: (\n 'Same Odoo module absolute import. You should use '\n 'relative import with \".\" '\n 'instead of \"openerp.addons.%s\"',\n 'odoo-addons-relative-import',\n settings.DESC_DFLT\n ),\n 'W%d40' % settings.BASE_OMODULE_ID: (\n '%s Dangerous use of \"replace\" from view '\n 'with priority %s < %s. '\n 'Increase priority or don\\'t use \"replace\". '\n 'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',\n 'dangerous-view-replace-wo-priority',\n settings.DESC_DFLT\n ),\n 'W%d30' % settings.BASE_OMODULE_ID: (\n '%s not used from manifest',\n 'file-not-used',\n settings.DESC_DFLT\n ),\n 'W%d35' % settings.BASE_OMODULE_ID: (\n 'External dependency \"%s\" without ImportError. More info: '\n 'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'\n '#external-dependencies',\n 'missing-import-error',\n settings.DESC_DFLT\n ),\n 'W%d36' % settings.BASE_OMODULE_ID: (\n 'Missing external dependency \"%s\" from manifest. More info: '\n 'https://github.com/OCA/odoo-community.org/blob/master/website/'\n 'Contribution/CONTRIBUTING.rst'\n '#external-dependencies',\n 'missing-manifest-dependency',\n settings.DESC_DFLT\n ),\n 'W%d38' % settings.BASE_OMODULE_ID: (\n 'pass into block except. '\n 'If you really need to use the pass consider logging that exception',\n 'except-pass',\n settings.DESC_DFLT\n ),\n 'W%d37' % settings.BASE_OMODULE_ID: (\n '%s The xml attribute is missing the translation=\"off\" tag %s',\n 'xml-attribute-translatable',\n settings.DESC_DFLT\n ),\n 'W%d42' % settings.BASE_OMODULE_ID: (\n '%s Deprecated <tree> xml attribute \"%s\"',\n 'xml-deprecated-tree-attribute',\n settings.DESC_DFLT\n ),\n 'W%d43' % settings.BASE_OMODULE_ID: (\n '%s Deprecated QWeb directive \"%s\". Use \"t-options\" instead',\n 'xml-deprecated-qweb-directive',\n settings.DESC_DFLT\n ),\n 'W%d39' % settings.BASE_OMODULE_ID: (\n '%s Use <odoo> instead of <odoo><data> or use <odoo noupdate=\"1\">'\n 'instead of <odoo><data noupdate=\"1\">',\n 'deprecated-data-xml-node',\n settings.DESC_DFLT\n ),\n 'W%d44' % settings.BASE_OMODULE_ID: (\n '%s The resource in in src/href contains a not valid chararter',\n 'character-not-valid-in-resource-link',\n settings.DESC_DFLT\n ),\n}\n\n\nDFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \\\n '/blob/master/template/module/README.rst'\nDFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']\nDFTL_MIN_PRIORITY = 99\n# Files supported from manifest to convert\n# Extracted from openerp/tools/convert.py:def convert_file\nDFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']\nDFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [\n 'po', 'js', 'mako', 'rst', 'md', 'markdown']\nDFLT_IMPORT_NAME_WHITELIST = [\n # self-odoo\n 'odoo', 'openerp',\n # packages for unit tests only\n 'requests_mock',\n # Known external packages of odoo\n 'PIL', 'anybox.testing.openerp', 'argparse', 'babel',\n 'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',\n 'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',\n 'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',\n 'ofxparse', 'openid', 'passlib', 'pkg_resources',\n 'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',\n 'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',\n 'requests', 'serial', 'simplejson', 'six', 'suds',\n 'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',\n 'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',\n]\nDFTL_JSLINTRC = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))),\n 'examples', '.jslintrc'\n)\nDFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']\nDFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',\n 'update_xml']\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n name = settings.CFG_SECTION\n msgs = ODOO_MSGS\n options = (\n ('readme_template_url', {\n 'type': 'string',\n 'metavar': '<string>',\n 'default': DFTL_README_TMPL_URL,\n 'help': 'URL of README.rst template file',\n }),\n ('extfiles_to_lint', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_EXTFILES_TO_LINT,\n 'help': 'List of extension files to check separated by a comma.'\n }),\n ('min-priority', {\n 'type': 'int',\n 'metavar': '<int>',\n 'default': DFTL_MIN_PRIORITY,\n 'help': 'Minimum priority number of a view with replace of fields.'\n }),\n ('extfiles_convert', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_EXTFILES_CONVERT,\n 'help': 'List of extension files supported to convert '\n 'from manifest separated by a comma.'\n }),\n ('import_name_whitelist', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_IMPORT_NAME_WHITELIST,\n 'help': 'List of known import dependencies of odoo,'\n ' separated by a comma.'\n }),\n ('jslintrc', {\n 'type': 'string',\n 'metavar': '<path to file>',\n 'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,\n 'help': ('A path to a file that contains a configuration file of '\n 'javascript lint. You can use the environment variable '\n '\"PYLINT_ODOO_JSLINTRC\" too. Default: %s' % DFTL_JSLINTRC)\n }),\n ('deprecated_tree_attributes', {\n 'type': 'multiple_choice',\n 'metavar': '<attributes>',\n 'default': DFLT_DEPRECATED_TREE_ATTRS,\n 'choices': DFLT_DEPRECATED_TREE_ATTRS,\n 'help': 'List of deprecated list view attributes,'\n ' separated by a comma. Valid values: %s' % ', '.join(\n DFLT_DEPRECATED_TREE_ATTRS)\n }),\n )\n\n odoo_check_versions = {\n 'missing-import-error': {\n 'max_odoo_version': '11.0',\n },\n }\n\n class_inherit_names = []\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName) or \\\n node_left.name not in ('_inherit', '_name') or \\\n not isinstance(node.value, astroid.node_classes.Const) or \\\n not isinstance(node.parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n # Skip _name='model.name' _inherit='other.model' because is valid\n return\n key = (self.odoo_node, _inherit)\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file,\n os.path.dirname(odoo_node.file))\n path_nodes.append(\"%s:%d\" % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited',\n node=nodes[0],\n args=(class_dup_name, ', '.join(path_nodes)))\n\n def _get_odoo_module_imported(self, node):\n odoo_module = []\n if isinstance(node, astroid.ImportFrom) and \\\n ('openerp.addons' in node.modname or\n 'odoo.addons' in node.modname):\n packages = node.modname.split('.')\n if len(packages) >= 3:\n # from openerp.addons.odoo_module import models\n odoo_module.append(packages[2])\n else:\n # from openerp.addons import odoo_module\n odoo_module.append(node.names[0][0])\n elif isinstance(node, astroid.Import):\n for name, _ in node.names:\n if 'openerp.addons' not in name and 'odoo.addons' not in name:\n continue\n packages = name.split('.')\n if len(packages) >= 3:\n # import openerp.addons.odoo_module\n odoo_module.append(packages[2])\n return odoo_module\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node,\n args=(self.odoo_module_name))\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if importedmodnode and importedmodnode.file and \\\n modnode is not importedmodnode and \\\n importedmodnode.name != name:\n return True\n return False\n\n @staticmethod\n def _get_imported_module(importnode, modname):\n try:\n return importnode.do_import_module(modname)\n except:\n pass\n\n def _is_module_name_in_whitelist(self, module_name):\n # Try to find most specific placement instruction match (if any)\n # (from isort place_module() method)\n parts = module_name.split('.')\n module_names_to_check = [\n '.'.join(parts[:first_k])\n for first_k in range(len(parts), 0, -1)\n ]\n # Check if one of the module name is part of the whitelist.\n # For an module name such as 'anybox.testing.openerp', the\n # modules names to check will be:\n # ['anybox.testing.openerp', 'anybox.testing', 'anybox']\n # Only one of them has to be in the whitelist to be accepted.\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n # skip local packages because is not a external dependency.\n return\n if not self.manifest_dict:\n # skip if is not a module of odoo\n return\n if not isinstance(node.parent, astroid.Module):\n # skip nested import sentences\n return\n if self._is_absolute_import(node, module_name):\n # skip absolute imports\n return\n if self._is_module_name_in_whitelist(module_name):\n # ignore whitelisted modules\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n # skip if is not a external library or is a white list library\n return\n relpath = os.path.relpath(\n node.parent.file, os.path.dirname(self.manifest_file))\n if os.path.dirname(relpath) == 'tests':\n # import errors rules don't apply to the test files\n # since these files are loaded only when running tests\n # and in such a case your\n # module and their external dependencies are installed.\n return\n self.add_message('missing-import-error', node=node,\n args=(module_name,))\n\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and \\\n module_name.split('.')[0] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node,\n args=(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error',\n 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error',\n 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if (not handler.name and\n len(handler.body) == 1 and\n isinstance(handler.body[0], astroid.node_classes.Pass)):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(\n os.path.join(self.module_path, rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n r'No directive entry for \"([\\w|\\-]+)\"|'\n r'Unknown directive type \"([\\w|\\-]+)\"|'\n r'No role entry for \"([\\w|\\-]+)\"|'\n r'Unknown interpreted text role \"([\\w|\\-]+)\"', msg)\n # TODO: Add support for sphinx directives after fix\n # https://github.com/twolfson/restructuredtext-lint/issues/29\n if res:\n # Skip directive errors\n continue\n self.msg_args.append((\n \"%s:%d\" % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_readme(self):\n \"\"\"Check if exists ./README.{rst,md,txt} file\n :return: If exists return True else False\n \"\"\"\n self.msg_args = (self.config.readme_template_url,)\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = \"%s/%s_noupdate_%s\" % (\n record.attrib.get('section', ''),\n record.attrib.get('id', ''),\n record.getparent().attrib.get('noupdate', '0'),\n )\n all_records.setdefault(record_id, []).append(record)\n # Remove all keys which not duplicated\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in \\\n self._get_duplicate_xml_record_id(xml_records).items():\n self.msg_args.append((\n \"%s:%d\" % (os.path.relpath(fobjs[0].base, self.module_path),\n fobjs[0].sourceline),\n name,\n ', '.join([os.path.relpath(fobj.base, self.module_path) +\n ':' + str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True\n\n def _check_duplicate_id_csv(self):\n \"\"\"Check duplicate xml id in ir.model.access.csv files of a odoo module.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(\n xml_file, self.module):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file_rel, lineno), xml_id))\n if self.msg_args:\n return False\n return True\n\n def _check_character_not_valid_in_resource_link(self):\n \"\"\"The resource in in src/href contains a not valid chararter\"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = (doc.xpath('.//%s[@%s]' % (name, attr))\n if not isinstance(doc, string_types) else [])\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if (resource.startswith('/') and not\n re.search('^[.][a-zA-Z]+$', ext)):\n self.msg_args.append((\"%s:%s\" % (xml_file,\n node.sourceline)))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault(\n (field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'),\n field.getparent()), []).append(field)\n # Remove all keys which not duplicated by excluding them from the\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field',\n 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(\n record.xpath(xpath)).items():\n self.msg_args.append((\n \"%s:%d\" % (xml_file, fobjs[0].sourceline), name[0],\n ', '.join([str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n # if exists field=\"name\" then is a new record\n # then should be field=\"user_id\" too\n if ir_filter_fields and len(ir_filter_fields) == 1:\n # TODO: Add a list of msg_args before of return\n # TODO: Add source lineno in all xml checks\n self.msg_args = (\n \"%s:%d\" % (xml_file, ir_filter_record.sourceline),\n ir_filter_record.get('id'),)\n return False\n return True\n\n @staticmethod\n def _get_priority(view):\n try:\n priority_node = view.xpath(\"field[@name='priority'][1]\")[0]\n return int(priority_node.get('eval', priority_node.text) or 0)\n except (IndexError, ValueError):\n # IndexError: If the field is not found\n # ValueError: If the value found is not valid integer\n pass\n return 0\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = \\\n arch.xpath(\".//field[@name='name' and @position='replace'][1]\") + \\\n arch.xpath(\".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append((\n \"%s:%s\" % (xml_file, view.sourceline), priority,\n self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='res.users')\n # if exists field=\"name\" then is a new record\n # then should be context\n self.msg_args.extend([\n (\"%s:%s\" % (xml_file, user_record.sourceline))\n for user_record in user_records\n if user_record.xpath(\"field[@name='name']\") and\n 'no_reset_password' not in (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_openerp_xml_node(self):\n \"\"\"Check deprecated <openerp> xml node\n :return: False if exists <openerp> node and\n add list of xml files in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath(\"/openerp\") \\\n if not isinstance(doc, string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append(\n (\"%s:%d\" % (ext_file_rel, countline)))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_newline_extrafiles(self):\n \"\"\"Check missing newline in other ext files (.xml, .csv, .po)\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n last_line = ''\n # NOTE: SEEK_END just is supported with 'rb' mode for py3\n with open(ext_file, 'rb') as fp:\n if os.stat(ext_file).st_size > 1:\n fp.seek(-2, os.SEEK_END)\n last_line = fp.readline()\n if not (last_line.endswith(b'\\n') or\n last_line.endswith(b'\\r')):\n self.msg_args.append((ext_file_rel,))\n if self.msg_args:\n return False\n return True\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in self.manifest_dict.get(data_type) or []:\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in self.manifest_dict.get(data_type) or []:\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(\n self._get_xml_referenced_files_report(fname, data_type)\n )\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {\n # those files are relative to the addon path\n os.path.join(\n *record.attrib[attribute].split(os.sep)[1:]\n ): data_type\n for attribute in ['xml', 'xsl']\n for record in self.parse_xml(\n os.path.join(self.module_path, fname)\n )\n .xpath('//report[@%s]' % attribute)\n }\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if (self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions != ['8.0']):\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file, record.sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [\n {\n 'attr': 'colors',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@colors]',\n },\n {\n 'attr': 'fonts',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@fonts]',\n },\n {\n 'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},\n 'xpath': './/tree[@string]',\n },\n ]\n valid_versions = set(\n self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions)\n\n applicable_checks = [check for check in checks if (\n check['attr'] in self.config.deprecated_tree_attributes and\n bool(valid_versions - check['skip_versions']))]\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file),\n model='ir.ui.view'):\n\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append((\n '%s:%d' % (xml_file, record.sourceline),\n check['attr']))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_qweb_directive(self):\n \"\"\"Check for use of deprecated QWeb directives t-*-options.\n :return: False if deprecated directives are found, in which case\n self.msg_args will contain the error messages.\n \"\"\"\n valid_versions = set(self.linter._all_options[\n 'valid_odoo_versions'].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n\n deprecated_directives = {\n 't-esc-options',\n 't-field-options',\n 't-raw-options',\n }\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join(\n '/%s//template//*[%s]' % (tag, directive_attrs)\n for tag in ('odoo', 'openerp')\n )\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n # Find which directive was used exactly.\n directive = next(\n iter(set(node.attrib) & deprecated_directives))\n self.msg_args.append((\n '%s:%d' % (xml_file, node.sourceline), directive))\n return not bool(self.msg_args)\n",
"step-ids": [
24,
28,
33,
42,
46
]
}
|
[
24,
28,
33,
42,
46
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import glob
from ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,
ToggleButtons, Output, HTML, Button,
FileUpload, IntText, RadioButtons)
from cbm.utils import config
from cbm.ipycbm.utils import settings_ds, cbm_widgets
from cbm.ipycbm.ipy_ext import ext_func
from cbm.foi import foi_v1
from cbm.datas import db
try:
from cbm.foi import foi_v2
except Exception as err:
print(err)
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 1 (requires access to a database).
""", placeholder='FOI Information')
# Connect to database
config_info = HTML(value="""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
""", placeholder='FOI Information')
config_conn = Button(
value=False,
button_style='info',
tooltip='Configure db connection.',
icon='cogs',
layout=Layout(width='40px')
)
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn,
config_conn_box])
# Spatial data to be tested
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database""")
db_tables = Dropdown(
options=[],
description='db Tables:'
)
refresh_db_tables = Button(
value=False,
button_style='info',
tooltip='Get db tables.',
icon='refresh',
layout=Layout(width='40px')
)
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(
description='Create new table',
value=False,
button_style='info',
tooltip='upload_shp.',
icon='up'
)
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
# Thematic raster.
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# Database functions
dbf_info = HTML("""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database""")
dbf_insert = Button(
value=False,
button_style='info',
tooltip='Create functions.',
icon='fa-share-square'
)
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f"{path_foi_func}*.func")
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(
schema=sche, owner=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [
f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in functions]
outlog(
f"The functions: {('').join(finc_list)} where added to the database")
except Exception as err:
outlog("Could not add functions to dattabase.", err)
dbf_box = VBox(
[dbf_info, dbf_insert])
# FOI Parameters
param_info = HTML(
"""6. Set FOI v1 Parameters""")
# heterogeneity_threshold
param_heto_info = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
param_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_area_info = HTML("""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
""")
param_area = IntText(
value=2000,
description='area:',
tooltip="Minimum area for clusters selection.",
layout=Layout(width='200px')
)
param_box = VBox([param_info,
param_heto_info, HBox([param_min_het, param_max_het]),
param_area_info, param_area
])
# Run FOI analysis
run_info = Label("7. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v1',
value=False,
button_style='info',
tooltip='Run FOI analysis version 1',
icon='play',
)
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(
db_tables.value,
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info,
config_box,
spatial_box,
img_box,
yml_box,
dbf_box,
param_box,
run_box,
progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 2 (does not require access to a database).
""", placeholder='FOI Information')
# Vector file
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality.""")
shp_file = cbm_widgets.get_files_dropdown(
f'{path_foi}vector', '', 'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
# Thematic raster.
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# FOI Prerequisites
pre_info = Label("4. Set FOI v2 Parameters.")
# heterogeneity_threshold
pre_heto_chec = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
pre_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(
value=20,
description='pixels:',
tooltip="Minimum area for clusters selection.",
disabled=False,
layout=Layout(width='200px')
)
pre_pixel_connectivity = IntText(
value=8,
description='connectivity type:',
tooltip="Type of pixel connectivity in analysis. Accepted values: 4 or 8.",
disabled=False,
layout=Layout(width='200px')
)
pre_negative_buffer = IntText(
value=-10,
description='negative buffer:',
tooltip="Negative buffer to be applied on the FOI",
disabled=False,
layout=Layout(width='200px')
)
pre_box = VBox([
pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer,
HBox([pre_min_cluster_size,
HTML("Minimum area for clusters selection - only clusters bigger from this threshold will be counted.")])
])
# Run FOI analysis
run_info = Label("5. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v2',
value=False,
disabled=False,
button_style='info',
tooltip='Run FOI analysis version 2',
icon='play',
)
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f"{path_foi}vector/{shp_file.children[1].children[0].value}",
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
pre_negative_buffer.value,
pre_min_het.value,
pre_max_het.value,
pre_pixel_connectivity.value,
pre_min_cluster_size.value)
wbox_v2 = VBox([foi_info,
shp_box,
img_box,
yml_box,
pre_box,
run_info,
run_box,
progress])
return wbox_v2
|
normal
|
{
"blob_id": "2f9a081845685a4748c8b028ae4ee3a056a10284",
"index": 9779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-3": "<mask token>\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-4": "import os\nimport glob\nfrom ipywidgets import Text, Label, HBox, VBox, Layout, Dropdown, ToggleButtons, Output, HTML, Button, FileUpload, IntText, RadioButtons\nfrom cbm.utils import config\nfrom cbm.ipycbm.utils import settings_ds, cbm_widgets\nfrom cbm.ipycbm.ipy_ext import ext_func\nfrom cbm.foi import foi_v1\nfrom cbm.datas import db\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Konstantinos Anastasakis\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\n\nimport os\nimport glob\nfrom ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,\n ToggleButtons, Output, HTML, Button,\n FileUpload, IntText, RadioButtons)\n\nfrom cbm.utils import config\nfrom cbm.ipycbm.utils import settings_ds, cbm_widgets\nfrom cbm.ipycbm.ipy_ext import ext_func\nfrom cbm.foi import foi_v1\nfrom cbm.datas import db\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n foi_info = HTML(\"\"\"FOI procedures version 1 (requires access to a database).\n \"\"\", placeholder='FOI Information')\n\n # Connect to database\n\n config_info = HTML(value=\"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\", placeholder='FOI Information')\n config_conn = Button(\n value=False,\n button_style='info',\n tooltip='Configure db connection.',\n icon='cogs',\n layout=Layout(width='40px')\n )\n\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n\n config_box = VBox([config_info, config_conn,\n config_conn_box])\n\n # Spatial data to be tested\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\")\n\n db_tables = Dropdown(\n options=[],\n description='db Tables:'\n )\n refresh_db_tables = Button(\n value=False,\n button_style='info',\n tooltip='Get db tables.',\n icon='refresh',\n layout=Layout(width='40px')\n )\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n\n db_tables_box = HBox([db_tables, refresh_db_tables])\n\n upload_shp = Button(\n description='Create new table',\n value=False,\n button_style='info',\n tooltip='upload_shp.',\n icon='up'\n )\n\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n\n # Thematic raster.\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\")\n img_option = ToggleButtons(\n options=['Upload', 'Generate'],\n value=None,\n disabled=True,\n button_style='info', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Upnload your base image', 'Get from object storage']\n )\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n\n img_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}raster', '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n\n # YAML File upload\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\")\n\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n\n # Database functions\n dbf_info = HTML(\"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\")\n\n dbf_insert = Button(\n value=False,\n button_style='info',\n tooltip='Create functions.',\n icon='fa-share-square'\n )\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f\"{path_foi_func}*.func\")\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n\n for f in functions:\n db.insert_function(open(f).read().format(\n schema=sche, owner=user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [\n f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in functions]\n outlog(\n f\"The functions: {('').join(finc_list)} where added to the database\")\n except Exception as err:\n outlog(\"Could not add functions to dattabase.\", err)\n\n dbf_box = VBox(\n [dbf_info, dbf_insert])\n\n # FOI Parameters\n param_info = HTML(\n \"\"\"6. Set FOI v1 Parameters\"\"\")\n\n # heterogeneity_threshold\n param_heto_info = HTML(\"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\")\n param_min_het = IntText(\n value=30,\n description='MIN:',\n tooltip=\"Minimum threshold for heterogeneity checks\",\n layout=Layout(width='150px')\n )\n param_max_het = IntText(\n value=70,\n description='MAX:',\n tooltip=\"Maximum threshold for heterogeneity checks\",\n layout=Layout(width='150px')\n )\n\n param_area_info = HTML(\"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\")\n param_area = IntText(\n value=2000,\n description='area:',\n tooltip=\"Minimum area for clusters selection.\",\n layout=Layout(width='200px')\n )\n\n param_box = VBox([param_info,\n param_heto_info, HBox([param_min_het, param_max_het]),\n param_area_info, param_area\n ])\n\n # Run FOI analysis\n run_info = Label(\"7. Run the FOI analysis.\")\n run_analysis = Button(\n description='Run FOI v1',\n value=False,\n button_style='info',\n tooltip='Run FOI analysis version 1',\n icon='play',\n )\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(\n db_tables.value,\n f\"{path_foi}raster/{img_file.children[1].children[0].value}\",\n f\"{path_foi}{yml_file.children[1].children[0].value}\",\n param_min_het.value, param_max_het.value, param_area.value)\n\n wbox = VBox([foi_info,\n config_box,\n spatial_box,\n img_box,\n yml_box,\n dbf_box,\n param_box,\n run_box,\n progress])\n\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n foi_info = HTML(\"\"\"FOI procedures version 2 (does not require access to a database).\n \"\"\", placeholder='FOI Information')\n\n # Vector file\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\")\n shp_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}vector', '', 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n\n # Thematic raster.\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\")\n img_option = ToggleButtons(\n options=['Upload', 'Generate'],\n value=None,\n disabled=True,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Upnload your base image', 'Get from object storage']\n )\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}raster', '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n\n # YAML File upload\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\")\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n\n # FOI Prerequisites\n pre_info = Label(\"4. Set FOI v2 Parameters.\")\n\n # heterogeneity_threshold\n pre_heto_chec = HTML(\"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\")\n pre_min_het = IntText(\n value=30,\n description='MIN:',\n tooltip=\"Minimum threshold for heterogeneity checks\",\n disabled=False,\n layout=Layout(width='150px')\n )\n pre_max_het = IntText(\n value=70,\n description='MAX:',\n tooltip=\"Maximum threshold for heterogeneity checks\",\n disabled=False,\n layout=Layout(width='150px')\n )\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(\n value=20,\n description='pixels:',\n tooltip=\"Minimum area for clusters selection.\",\n disabled=False,\n layout=Layout(width='200px')\n )\n pre_pixel_connectivity = IntText(\n value=8,\n description='connectivity type:',\n tooltip=\"Type of pixel connectivity in analysis. Accepted values: 4 or 8.\",\n disabled=False,\n layout=Layout(width='200px')\n )\n pre_negative_buffer = IntText(\n value=-10,\n description='negative buffer:',\n tooltip=\"Negative buffer to be applied on the FOI\",\n disabled=False,\n layout=Layout(width='200px')\n )\n\n pre_box = VBox([\n pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer,\n HBox([pre_min_cluster_size,\n HTML(\"Minimum area for clusters selection - only clusters bigger from this threshold will be counted.\")])\n ])\n\n # Run FOI analysis\n run_info = Label(\"5. Run the FOI analysis.\")\n run_analysis = Button(\n description='Run FOI v2',\n value=False,\n disabled=False,\n button_style='info',\n tooltip='Run FOI analysis version 2',\n icon='play',\n )\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f\"{path_foi}vector/{shp_file.children[1].children[0].value}\",\n f\"{path_foi}raster/{img_file.children[1].children[0].value}\",\n f\"{path_foi}{yml_file.children[1].children[0].value}\",\n pre_negative_buffer.value,\n pre_min_het.value,\n pre_max_het.value,\n pre_pixel_connectivity.value,\n pre_min_cluster_size.value)\n\n wbox_v2 = VBox([foi_info,\n shp_box,\n img_box,\n yml_box,\n pre_box,\n run_info,\n run_box,\n progress])\n\n return wbox_v2\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import pytest
from flaat.issuers import IssuerConfig, is_url
from flaat.test_env import FLAAT_AT, FLAAT_ISS, environment
class TestURLs:
def test_url_1(self):
assert is_url("http://heise.de")
def test_valid_url_http(self):
assert is_url("http://heise.de")
def test_valid_url_https(self):
assert is_url("http://heise.de")
def test_valid_url_ftp(self):
assert is_url("http://heise.de")
def test_valid_url_https_path(self):
assert is_url("https://heise.de/thi_s&is=difficult")
def test_invalid_url(self):
assert not is_url("htp://heise.de")
def test_token_introspection():
client_id = environment.get("FLAAT_CLIENT_ID")
client_secret = environment.get("FLAAT_CLIENT_SECRET")
if client_id is None or client_secret is None: # pragma: no cover
pytest.skip("FLAAT_CLIENT_ID and FLAAT_CLIENT_SECRET are not set")
issuer_config = IssuerConfig.get_from_string(FLAAT_ISS)
assert issuer_config is not None
issuer_config.client_id = client_id
issuer_config.client_secret = client_secret
introspection_info = issuer_config._get_introspected_token_info(FLAAT_AT)
assert introspection_info is not None
|
normal
|
{
"blob_id": "021f224d031477bd305644261ad4d79d9eca98b3",
"index": 5474,
"step-1": "<mask token>\n\n\nclass TestURLs:\n\n def test_url_1(self):\n assert is_url('http://heise.de')\n <mask token>\n <mask token>\n <mask token>\n\n def test_valid_url_https_path(self):\n assert is_url('https://heise.de/thi_s&is=difficult')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestURLs:\n\n def test_url_1(self):\n assert is_url('http://heise.de')\n <mask token>\n <mask token>\n <mask token>\n\n def test_valid_url_https_path(self):\n assert is_url('https://heise.de/thi_s&is=difficult')\n\n def test_invalid_url(self):\n assert not is_url('htp://heise.de')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestURLs:\n\n def test_url_1(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_http(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_https(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_ftp(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_https_path(self):\n assert is_url('https://heise.de/thi_s&is=difficult')\n\n def test_invalid_url(self):\n assert not is_url('htp://heise.de')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestURLs:\n\n def test_url_1(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_http(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_https(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_ftp(self):\n assert is_url('http://heise.de')\n\n def test_valid_url_https_path(self):\n assert is_url('https://heise.de/thi_s&is=difficult')\n\n def test_invalid_url(self):\n assert not is_url('htp://heise.de')\n\n\ndef test_token_introspection():\n client_id = environment.get('FLAAT_CLIENT_ID')\n client_secret = environment.get('FLAAT_CLIENT_SECRET')\n if client_id is None or client_secret is None:\n pytest.skip('FLAAT_CLIENT_ID and FLAAT_CLIENT_SECRET are not set')\n issuer_config = IssuerConfig.get_from_string(FLAAT_ISS)\n assert issuer_config is not None\n issuer_config.client_id = client_id\n issuer_config.client_secret = client_secret\n introspection_info = issuer_config._get_introspected_token_info(FLAAT_AT)\n assert introspection_info is not None\n",
"step-5": "import pytest\n\nfrom flaat.issuers import IssuerConfig, is_url\nfrom flaat.test_env import FLAAT_AT, FLAAT_ISS, environment\n\n\nclass TestURLs:\n def test_url_1(self):\n assert is_url(\"http://heise.de\")\n\n def test_valid_url_http(self):\n assert is_url(\"http://heise.de\")\n\n def test_valid_url_https(self):\n assert is_url(\"http://heise.de\")\n\n def test_valid_url_ftp(self):\n assert is_url(\"http://heise.de\")\n\n def test_valid_url_https_path(self):\n assert is_url(\"https://heise.de/thi_s&is=difficult\")\n\n def test_invalid_url(self):\n assert not is_url(\"htp://heise.de\")\n\n\ndef test_token_introspection():\n client_id = environment.get(\"FLAAT_CLIENT_ID\")\n client_secret = environment.get(\"FLAAT_CLIENT_SECRET\")\n if client_id is None or client_secret is None: # pragma: no cover\n pytest.skip(\"FLAAT_CLIENT_ID and FLAAT_CLIENT_SECRET are not set\")\n\n issuer_config = IssuerConfig.get_from_string(FLAAT_ISS)\n assert issuer_config is not None\n issuer_config.client_id = client_id\n issuer_config.client_secret = client_secret\n introspection_info = issuer_config._get_introspected_token_info(FLAAT_AT)\n assert introspection_info is not None\n",
"step-ids": [
3,
4,
7,
8,
10
]
}
|
[
3,
4,
7,
8,
10
] |
import flask
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pickle
from recent_earnings_tickers import ok_tickers
import re
#---------- Model ----------------#
#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:
#PREDICTOR = pickle.load(f)
'''Have final model in the pickle file
Should be prefit to main data
Simply ask for a company/list of companies
Input the ticker into model (which will scrape web for current features)
Pray some of them are right'''
#---------- URLS AND WEB PAGES -------------#
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open("/Users/samfunk/ds/metis/project_mcnulty/stock_page.html",'r') as viz_file:
return viz_file.read()
@app.route("/stock", methods=["POST"])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data["ticker"]).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get("https://finance.yahoo.com/quote/%s/analysts?p=%s" % (ticker, ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text
surprise = float(re.search(r'(.*)%', surprise_string)[1])
#score = PREDICTOR.predict_proba(x)
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
#score = PREDICTOR.predict_proba(x)
results = {"surprise": surprise_string, "score": score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "3be1947ead65f8e8a9bf73cc8cae2c7d69d8b756",
"index": 1641,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n@app.route('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-3": "<mask token>\napp = flask.Flask(__name__)\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n@app.route('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "import flask\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom recent_earnings_tickers import ok_tickers\nimport re\n<mask token>\napp = flask.Flask(__name__)\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n@app.route('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "import flask\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom recent_earnings_tickers import ok_tickers\nimport re\n\n#---------- Model ----------------#\n\n#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:\n #PREDICTOR = pickle.load(f)\n\n\n'''Have final model in the pickle file\nShould be prefit to main data\nSimply ask for a company/list of companies\nInput the ticker into model (which will scrape web for current features)\nPray some of them are right'''\n\n\n\n#---------- URLS AND WEB PAGES -------------#\napp = flask.Flask(__name__)\n\n@app.route('/')\ndef home_page():\n with open(\"/Users/samfunk/ds/metis/project_mcnulty/stock_page.html\",'r') as viz_file:\n return viz_file.read()\n\n\n@app.route(\"/stock\", methods=[\"POST\"])\ndef stock(ok_tickers=ok_tickers()):\n\n data = flask.request.json\n ticker = str(data[\"ticker\"]).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\"https://finance.yahoo.com/quote/%s/analysts?p=%s\" % (ticker, ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text\n surprise = float(re.search(r'(.*)%', surprise_string)[1])\n\n\n #score = PREDICTOR.predict_proba(x)\n\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n #score = PREDICTOR.predict_proba(x)\n results = {\"surprise\": surprise_string, \"score\": score}\n\n print(ticker, results)\n return flask.jsonify(results)\n\nif __name__ == '__main__':\n app.run()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from __future__ import division
import abc
import re
import numpy as np
class NGram(object):
SEP = ''
def __init__(self, n, text):
self.n = n
self.load_text(text)
self.load_ngram()
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
def get_count(self, x, y=''):
if len(y) > self.n:
# raise RuntimeError('Invalid n-gram')
return 0
elif len(y) == self.n:
p = '^'.join(y)
if x in self.counts and p in self.counts[x]:
return self.counts[x][p]
else:
return 0
else:
p = '^'.join(y)
count = 0
if x in self.counts:
for x_prev in self.counts[x].keys():
if x_prev[-len(p):] == p:
count += self.counts[x][x_prev]
return count
def prev_n(self, i):
return self.text[i - self.n: i]
def empty_count(self):
s = {}
return { c: dict() for c in self.cols() }
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[(len(s) - self.n):])
s.append(sampling)
c -= 1
return self.SEP.join(s)
def sample(self, previous):
assert len(previous) <= self.n
tokens, distribution = self.distribution('^'.join(previous))
i = np.nonzero(np.random.multinomial(1, distribution))[0][0]
return tokens[i]
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1/len(counts))
return (tokens, map(probability, counts))
@abc.abstractmethod
def cols(self):
pass
@staticmethod
def clean(text):
s = text.lower()
s = re.sub(r'\n', ' ', s)
s = re.sub(r'[^a-z ]+', ' ', s)
return s
|
normal
|
{
"blob_id": "41e3c18b02f9d80f987d09227da1fbc6bde0ed1d",
"index": 4812,
"step-1": "<mask token>\n\n\nclass NGram(object):\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n <mask token>\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n <mask token>\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n <mask token>\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NGram(object):\n <mask token>\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n <mask token>\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n\n def empty_count(self):\n s = {}\n return {c: dict() for c in self.cols()}\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n <mask token>\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NGram(object):\n <mask token>\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n <mask token>\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n\n def empty_count(self):\n s = {}\n return {c: dict() for c in self.cols()}\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n\n def sample(self, previous):\n assert len(previous) <= self.n\n tokens, distribution = self.distribution('^'.join(previous))\n i = np.nonzero(np.random.multinomial(1, distribution))[0][0]\n return tokens[i]\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n\n @staticmethod\n def clean(text):\n s = text.lower()\n s = re.sub('\\\\n', ' ', s)\n s = re.sub('[^a-z ]+', ' ', s)\n return s\n",
"step-4": "<mask token>\n\n\nclass NGram(object):\n SEP = ''\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n\n def get_count(self, x, y=''):\n if len(y) > self.n:\n return 0\n elif len(y) == self.n:\n p = '^'.join(y)\n if x in self.counts and p in self.counts[x]:\n return self.counts[x][p]\n else:\n return 0\n else:\n p = '^'.join(y)\n count = 0\n if x in self.counts:\n for x_prev in self.counts[x].keys():\n if x_prev[-len(p):] == p:\n count += self.counts[x][x_prev]\n return count\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n\n def empty_count(self):\n s = {}\n return {c: dict() for c in self.cols()}\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n\n def sample(self, previous):\n assert len(previous) <= self.n\n tokens, distribution = self.distribution('^'.join(previous))\n i = np.nonzero(np.random.multinomial(1, distribution))[0][0]\n return tokens[i]\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n\n @staticmethod\n def clean(text):\n s = text.lower()\n s = re.sub('\\\\n', ' ', s)\n s = re.sub('[^a-z ]+', ' ', s)\n return s\n",
"step-5": "from __future__ import division\nimport abc\nimport re\nimport numpy as np\n\nclass NGram(object):\n SEP = ''\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n\n self.counts = counts\n\n def get_count(self, x, y=''):\n if len(y) > self.n:\n # raise RuntimeError('Invalid n-gram')\n return 0\n elif len(y) == self.n:\n p = '^'.join(y)\n if x in self.counts and p in self.counts[x]:\n return self.counts[x][p]\n else:\n return 0\n else:\n p = '^'.join(y)\n count = 0\n if x in self.counts:\n for x_prev in self.counts[x].keys():\n if x_prev[-len(p):] == p:\n count += self.counts[x][x_prev]\n return count\n\n def prev_n(self, i):\n return self.text[i - self.n: i]\n\n def empty_count(self):\n s = {}\n return { c: dict() for c in self.cols() }\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[(len(s) - self.n):])\n s.append(sampling)\n c -= 1\n\n return self.SEP.join(s)\n\n def sample(self, previous):\n assert len(previous) <= self.n\n tokens, distribution = self.distribution('^'.join(previous))\n i = np.nonzero(np.random.multinomial(1, distribution))[0][0]\n return tokens[i]\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1/len(counts))\n return (tokens, map(probability, counts))\n\n @abc.abstractmethod\n def cols(self):\n pass\n\n @staticmethod\n def clean(text):\n s = text.lower()\n s = re.sub(r'\\n', ' ', s)\n s = re.sub(r'[^a-z ]+', ' ', s)\n return s\n",
"step-ids": [
7,
9,
11,
13,
15
]
}
|
[
7,
9,
11,
13,
15
] |
"""
"""
import json
import logging
import re
import asyncio
from typing import Optional
import discord
from discord.ext import commands
import utils
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
log = logging.getLogger("YTEmbedFixer")
client = commands.Bot(command_prefix="yt!",
max_messages=5000,
description="A bot for fixing what Discord can't.\n",
owner_id=389590659335716867,
case_insensitive=True)
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info("Connected to {} servers.".format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:
regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
# We have a valid youtube link with Embed! Check if it broken.
# We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.
if message.embeds[0].type == "link": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':
# We have a broken embed!
await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message
msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = "https://www.youtube.com/watch?v={}".format(matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if video_title is None and video_image is None and author_name is None and author_url is None:
#We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.
return None # Don't post empty embed.
embed = build_embed(video_url, video_image, video_title, author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,
avatar_url=original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.display_name,
avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.
def build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],
_author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:
embed = discord.Embed(type="video", colour=discord.Colour.from_rgb(255, 0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
# ---- Command Error Handling ----- #
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send("⚠ This command can not be used in DMs!!!")
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send("⚠ Invalid Command!!!")
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send("⚠ You need the **Manage Messages** permission to use this command".format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send("⚠ {}".format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send("⚠ {}".format(error))
else:
await ctx.send("⚠ {}".format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name="invite", brief="Sends the invite link")
async def send_invite_link(ctx: commands.Context):
# link = "https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176"
link = "https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176".format(client.user.id)
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info("cleaning Up and shutting down")
|
normal
|
{
"blob_id": "d73832d3f0adf22085a207ab223854e11fffa2e8",
"index": 6948,
"step-1": "<mask token>\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n<mask token>\n\n\n@client.event\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n@client.event\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\n@client.event\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\n@client.event\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\n@client.command(name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nlog = logging.getLogger('YTEmbedFixer')\nclient = commands.Bot(command_prefix='yt!', max_messages=5000, description=\n \"\"\"A bot for fixing what Discord can't.\n\"\"\", owner_id=\n 389590659335716867, case_insensitive=True)\n\n\n@client.event\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n@client.event\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\n@client.event\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\n@client.event\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\n@client.command(name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-4": "<mask token>\nimport json\nimport logging\nimport re\nimport asyncio\nfrom typing import Optional\nimport discord\nfrom discord.ext import commands\nimport utils\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nlog = logging.getLogger('YTEmbedFixer')\nclient = commands.Bot(command_prefix='yt!', max_messages=5000, description=\n \"\"\"A bot for fixing what Discord can't.\n\"\"\", owner_id=\n 389590659335716867, case_insensitive=True)\n\n\n@client.event\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n@client.event\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\n@client.event\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\n@client.event\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\n@client.command(name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-5": "\"\"\"\n\n\"\"\"\n\nimport json\nimport logging\nimport re\nimport asyncio\nfrom typing import Optional\n\nimport discord\nfrom discord.ext import commands\nimport utils\n\n\nlogging.basicConfig(level=logging.INFO, format=\"[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s\")\nlog = logging.getLogger(\"YTEmbedFixer\")\n\n\nclient = commands.Bot(command_prefix=\"yt!\",\n max_messages=5000,\n description=\"A bot for fixing what Discord can't.\\n\",\n owner_id=389590659335716867,\n case_insensitive=True)\n\n\n@client.event\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info(\"Connected to {} servers.\".format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=activity)\n\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:\n regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n # We have a valid youtube link with Embed! Check if it broken.\n # We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.\n if message.embeds[0].type == \"link\": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':\n # We have a broken embed!\n\n await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message\n\n msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.\n if msg_check is not None:\n\n html = await utils.get_video_webpage(matches[0])\n\n video_url = \"https://www.youtube.com/watch?v={}\".format(matches[0])\n\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n\n if video_title is None and video_image is None and author_name is None and author_url is None:\n #We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.\n return None # Don't post empty embed.\n embed = build_embed(video_url, video_image, video_title, author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)\n\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,\n avatar_url=original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.display_name,\n avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],\n _author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:\n embed = discord.Embed(type=\"video\", colour=discord.Colour.from_rgb(255, 0, 0))\n\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n# ---- Command Error Handling ----- #\n@client.event\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send(\"⚠ This command can not be used in DMs!!!\")\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send(\"⚠ Invalid Command!!!\")\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\"⚠ You need the **Manage Messages** permission to use this command\".format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send(\"⚠ {}\".format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send(\"⚠ {}\".format(error))\n else:\n await ctx.send(\"⚠ {}\".format(error))\n raise error\n\n\n@client.event\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\n@client.event\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\n@client.command(name=\"invite\", brief=\"Sends the invite link\")\nasync def send_invite_link(ctx: commands.Context):\n # link = \"https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176\"\n link = \"https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176\".format(client.user.id)\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n\n log.info(\"cleaning Up and shutting down\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
# Convert the ALPHABET to list
ALPHABET = [i for i in ALPHABET]
output_string = ''
input_string = input('Enter a String : ')
key = int(input('Enter the key: '))
for letter in input_string:
if letter in input_string:
# ALPHABET.index(letter) returns the index of that letter in the ALPHABET list
# then we can add the key to that index to get the letter
# then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list
output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]
else:
output_string += letter
print(f'Encoded String is {output_string}')
|
normal
|
{
"blob_id": "b2db622596d0dff970e44759d25360a62f5fea83",
"index": 4725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor letter in input_string:\n if letter in input_string:\n output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]\n else:\n output_string += letter\nprint(f'Encoded String is {output_string}')\n",
"step-3": "ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\nALPHABET = [i for i in ALPHABET]\noutput_string = ''\ninput_string = input('Enter a String : ')\nkey = int(input('Enter the key: '))\nfor letter in input_string:\n if letter in input_string:\n output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]\n else:\n output_string += letter\nprint(f'Encoded String is {output_string}')\n",
"step-4": "ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\n# Convert the ALPHABET to list\nALPHABET = [i for i in ALPHABET]\noutput_string = ''\ninput_string = input('Enter a String : ')\n\nkey = int(input('Enter the key: '))\n\nfor letter in input_string:\n if letter in input_string:\n # ALPHABET.index(letter) returns the index of that letter in the ALPHABET list\n # then we can add the key to that index to get the letter\n # then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list\n output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]\n else:\n output_string += letter\n\nprint(f'Encoded String is {output_string}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
<|reserved_special_token_0|>
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
<|reserved_special_token_0|>
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def check_switch_area_code(self):
return self.switch_area_code().text
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
<|reserved_special_token_0|>
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
<|reserved_special_token_0|>
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
<|reserved_special_token_0|>
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
<|reserved_special_token_0|>
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
def logged_in_not_vip_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))
return self
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
def write_in_error_quantity(self):
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
<|reserved_special_token_0|>
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
<|reserved_special_token_0|>
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import random
from elment.login_registration_element import LoginRegistration
from page.test_verification_code_page import VerificationCodeAction
public_number_vip = ['17800000000', '17800000001', '17800000002',
'17800000003', '17800000004', '17800000005', '17800000006',
'17800000007', '17800000008', '17800000009']
public_number_not_vip = ['18381939440', '18381939441', '18381939445',
'18381939446']
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
def logged_in_not_vip_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))
return self
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
def write_in_error_quantity(self):
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
def logged_in_assert(self):
assert '欢迎登录迅游' in self.check_welcome_xunyou()
return self
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
def click_exit_area_code(self):
self.exit_area_code().click()
return self
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
def check_memory_logged_in_number(self):
return self.memory_logged_in_number().text
<|reserved_special_token_1|>
import random
from elment.login_registration_element import LoginRegistration
from page.test_verification_code_page import VerificationCodeAction
public_number_vip = ['17800000000','17800000001','17800000002','17800000003','17800000004','17800000005','17800000006',
'17800000007','17800000008','17800000009']
public_number_not_vip = ['18381939440', '18381939441', '18381939445', '18381939446']
class LoginRegistrationAction(LoginRegistration): # 登录页操作
def check_welcome_xunyou(self): # 欢迎登陆迅游text
return self.welcome_xunyou().text
def click_welcome_xunyou(self): # 点击欢迎登录迅游(可以将键盘降下去)
self.welcome_xunyou().click()
return self
def logged_in_random(self): # 点击号码栏输入随机账号
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,9999)))
return self
def logged_in_appoint(self): # 登录随机vip
self.phone_id().send_keys(str(random.sample(public_number_vip,1)))
return self
def logged_in_not_vip_appoint(self): # 登录随机非会员账号
self.phone_id().send_keys(str(random.sample(public_number_not_vip,1)))
return self
def logged_in_appoint_183(self): # 登录18333334444
self.phone_id().send_keys('18333334444')
return self
# def check_logged_in_title(self): # 查看更多页已登录账号元素展示
def click_verification_code(self): # 点击获取验证码
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self): # 获取验证码按钮是否可点击
return self.verification_code().is_enabled()
def write_in_error_quantity(self): # 输入多位手机号
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self): # 判断手机号位数
return len(self.phone_id().text)
def click_privacy_agreement(self): # 点击登录页隐私协议入口
self.privacy_agreement().click()
return self
def click_service_agreement(self): # 点击登录页服务协议入口
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self): # 点击隐私协议详情页左上角<
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self): # 点击服务协议详情页左上角<
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self): # 检查键盘Delete文本,可用来判断键盘是否存在
return self.keyboard_Delete().text
def logged_in_assert(self): # 判断是否进入了登录页
assert "欢迎登录迅游" in self.check_welcome_xunyou()
return self
def click_exit_logged_in(self): # 点击登录页左上角<点击,在加速首页触发的登录,返回加速页
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self): # 点击区号按钮
self.default_area_code().click()
return self
def click_exit_area_code(self): # 点击区号页左上角<,返回登录页
self.exit_area_code().click()
return self
def click_switch_area_code(self): # 点击区号页面阿富汗区号
self.switch_area_code().click()
return self
def check_switch_area_code(self): # 查看修改后的区号
return self.switch_area_code().text
def check_memory_logged_in_number(self): # 查看账号记忆功能文本
return self.memory_logged_in_number().text
|
flexible
|
{
"blob_id": "e5a698979bc84fe733a9bf5cd51e2f078956d468",
"index": 2461,
"step-1": "<mask token>\n\n\nclass LoginRegistrationAction(LoginRegistration):\n\n def check_welcome_xunyou(self):\n return self.welcome_xunyou().text\n <mask token>\n\n def logged_in_random(self):\n self.phone_id().send_keys('1831111{}'.format(random.randint(1000, \n 9999)))\n return self\n <mask token>\n <mask token>\n\n def logged_in_appoint_183(self):\n self.phone_id().send_keys('18333334444')\n return self\n\n def click_verification_code(self):\n self.verification_code().click()\n return VerificationCodeAction(self._driver)\n <mask token>\n <mask token>\n\n def number_quantity(self):\n return len(self.phone_id().text)\n\n def click_privacy_agreement(self):\n self.privacy_agreement().click()\n return self\n\n def click_service_agreement(self):\n self.service_agreement().click()\n return self\n\n def click_exit_privacy_agreement(self):\n self.exit_privacy_agreement().click()\n return self\n\n def click_exit_service_agreement(self):\n self.exit_service_agreement().click()\n return self\n\n def check_keyboard_Delete(self):\n return self.keyboard_Delete().text\n <mask token>\n\n def click_exit_logged_in(self):\n self.exit_logged_in().click()\n from page.test_accelerate_page import AccelerateHomeAction\n return AccelerateHomeAction(self._driver)\n\n def click_default_area_code(self):\n self.default_area_code().click()\n return self\n <mask token>\n <mask token>\n\n def check_switch_area_code(self):\n return self.switch_area_code().text\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LoginRegistrationAction(LoginRegistration):\n\n def check_welcome_xunyou(self):\n return self.welcome_xunyou().text\n\n def click_welcome_xunyou(self):\n self.welcome_xunyou().click()\n return self\n\n def logged_in_random(self):\n self.phone_id().send_keys('1831111{}'.format(random.randint(1000, \n 9999)))\n return self\n\n def logged_in_appoint(self):\n self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))\n return self\n <mask token>\n\n def logged_in_appoint_183(self):\n self.phone_id().send_keys('18333334444')\n return self\n\n def click_verification_code(self):\n self.verification_code().click()\n return VerificationCodeAction(self._driver)\n\n def check_verification_code_enabled(self):\n return self.verification_code().is_enabled()\n <mask token>\n\n def number_quantity(self):\n return len(self.phone_id().text)\n\n def click_privacy_agreement(self):\n self.privacy_agreement().click()\n return self\n\n def click_service_agreement(self):\n self.service_agreement().click()\n return self\n\n def click_exit_privacy_agreement(self):\n self.exit_privacy_agreement().click()\n return self\n\n def click_exit_service_agreement(self):\n self.exit_service_agreement().click()\n return self\n\n def check_keyboard_Delete(self):\n return self.keyboard_Delete().text\n <mask token>\n\n def click_exit_logged_in(self):\n self.exit_logged_in().click()\n from page.test_accelerate_page import AccelerateHomeAction\n return AccelerateHomeAction(self._driver)\n\n def click_default_area_code(self):\n self.default_area_code().click()\n return self\n <mask token>\n\n def click_switch_area_code(self):\n self.switch_area_code().click()\n return self\n\n def check_switch_area_code(self):\n return self.switch_area_code().text\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LoginRegistrationAction(LoginRegistration):\n\n def check_welcome_xunyou(self):\n return self.welcome_xunyou().text\n\n def click_welcome_xunyou(self):\n self.welcome_xunyou().click()\n return self\n\n def logged_in_random(self):\n self.phone_id().send_keys('1831111{}'.format(random.randint(1000, \n 9999)))\n return self\n\n def logged_in_appoint(self):\n self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))\n return self\n\n def logged_in_not_vip_appoint(self):\n self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))\n return self\n\n def logged_in_appoint_183(self):\n self.phone_id().send_keys('18333334444')\n return self\n\n def click_verification_code(self):\n self.verification_code().click()\n return VerificationCodeAction(self._driver)\n\n def check_verification_code_enabled(self):\n return self.verification_code().is_enabled()\n\n def write_in_error_quantity(self):\n self.phone_id().send_keys('1399999219392s我!3')\n return self\n\n def number_quantity(self):\n return len(self.phone_id().text)\n\n def click_privacy_agreement(self):\n self.privacy_agreement().click()\n return self\n\n def click_service_agreement(self):\n self.service_agreement().click()\n return self\n\n def click_exit_privacy_agreement(self):\n self.exit_privacy_agreement().click()\n return self\n\n def click_exit_service_agreement(self):\n self.exit_service_agreement().click()\n return self\n\n def check_keyboard_Delete(self):\n return self.keyboard_Delete().text\n <mask token>\n\n def click_exit_logged_in(self):\n self.exit_logged_in().click()\n from page.test_accelerate_page import AccelerateHomeAction\n return AccelerateHomeAction(self._driver)\n\n def click_default_area_code(self):\n self.default_area_code().click()\n return self\n <mask token>\n\n def click_switch_area_code(self):\n self.switch_area_code().click()\n return self\n\n def check_switch_area_code(self):\n return self.switch_area_code().text\n <mask token>\n",
"step-4": "import random\nfrom elment.login_registration_element import LoginRegistration\nfrom page.test_verification_code_page import VerificationCodeAction\npublic_number_vip = ['17800000000', '17800000001', '17800000002',\n '17800000003', '17800000004', '17800000005', '17800000006',\n '17800000007', '17800000008', '17800000009']\npublic_number_not_vip = ['18381939440', '18381939441', '18381939445',\n '18381939446']\n\n\nclass LoginRegistrationAction(LoginRegistration):\n\n def check_welcome_xunyou(self):\n return self.welcome_xunyou().text\n\n def click_welcome_xunyou(self):\n self.welcome_xunyou().click()\n return self\n\n def logged_in_random(self):\n self.phone_id().send_keys('1831111{}'.format(random.randint(1000, \n 9999)))\n return self\n\n def logged_in_appoint(self):\n self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))\n return self\n\n def logged_in_not_vip_appoint(self):\n self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))\n return self\n\n def logged_in_appoint_183(self):\n self.phone_id().send_keys('18333334444')\n return self\n\n def click_verification_code(self):\n self.verification_code().click()\n return VerificationCodeAction(self._driver)\n\n def check_verification_code_enabled(self):\n return self.verification_code().is_enabled()\n\n def write_in_error_quantity(self):\n self.phone_id().send_keys('1399999219392s我!3')\n return self\n\n def number_quantity(self):\n return len(self.phone_id().text)\n\n def click_privacy_agreement(self):\n self.privacy_agreement().click()\n return self\n\n def click_service_agreement(self):\n self.service_agreement().click()\n return self\n\n def click_exit_privacy_agreement(self):\n self.exit_privacy_agreement().click()\n return self\n\n def click_exit_service_agreement(self):\n self.exit_service_agreement().click()\n return self\n\n def check_keyboard_Delete(self):\n return self.keyboard_Delete().text\n\n def logged_in_assert(self):\n assert '欢迎登录迅游' in self.check_welcome_xunyou()\n return self\n\n def click_exit_logged_in(self):\n self.exit_logged_in().click()\n from page.test_accelerate_page import AccelerateHomeAction\n return AccelerateHomeAction(self._driver)\n\n def click_default_area_code(self):\n self.default_area_code().click()\n return self\n\n def click_exit_area_code(self):\n self.exit_area_code().click()\n return self\n\n def click_switch_area_code(self):\n self.switch_area_code().click()\n return self\n\n def check_switch_area_code(self):\n return self.switch_area_code().text\n\n def check_memory_logged_in_number(self):\n return self.memory_logged_in_number().text\n",
"step-5": "import random\n\nfrom elment.login_registration_element import LoginRegistration\nfrom page.test_verification_code_page import VerificationCodeAction\npublic_number_vip = ['17800000000','17800000001','17800000002','17800000003','17800000004','17800000005','17800000006',\n '17800000007','17800000008','17800000009']\n\npublic_number_not_vip = ['18381939440', '18381939441', '18381939445', '18381939446']\n\nclass LoginRegistrationAction(LoginRegistration): # 登录页操作\n\n def check_welcome_xunyou(self): # 欢迎登陆迅游text\n return self.welcome_xunyou().text\n\n def click_welcome_xunyou(self): # 点击欢迎登录迅游(可以将键盘降下去)\n self.welcome_xunyou().click()\n return self\n\n def logged_in_random(self): # 点击号码栏输入随机账号\n self.phone_id().send_keys('1831111{}'.format(random.randint(1000,9999)))\n return self\n\n def logged_in_appoint(self): # 登录随机vip\n self.phone_id().send_keys(str(random.sample(public_number_vip,1)))\n return self\n\n def logged_in_not_vip_appoint(self): # 登录随机非会员账号\n self.phone_id().send_keys(str(random.sample(public_number_not_vip,1)))\n return self\n\n def logged_in_appoint_183(self): # 登录18333334444\n self.phone_id().send_keys('18333334444')\n return self\n\n # def check_logged_in_title(self): # 查看更多页已登录账号元素展示\n\n def click_verification_code(self): # 点击获取验证码\n self.verification_code().click()\n return VerificationCodeAction(self._driver)\n\n def check_verification_code_enabled(self): # 获取验证码按钮是否可点击\n return self.verification_code().is_enabled()\n\n def write_in_error_quantity(self): # 输入多位手机号\n self.phone_id().send_keys('1399999219392s我!3')\n return self\n\n def number_quantity(self): # 判断手机号位数\n return len(self.phone_id().text)\n\n def click_privacy_agreement(self): # 点击登录页隐私协议入口\n self.privacy_agreement().click()\n return self\n\n def click_service_agreement(self): # 点击登录页服务协议入口\n self.service_agreement().click()\n return self\n\n def click_exit_privacy_agreement(self): # 点击隐私协议详情页左上角<\n self.exit_privacy_agreement().click()\n return self\n\n def click_exit_service_agreement(self): # 点击服务协议详情页左上角<\n self.exit_service_agreement().click()\n return self\n\n def check_keyboard_Delete(self): # 检查键盘Delete文本,可用来判断键盘是否存在\n return self.keyboard_Delete().text\n\n def logged_in_assert(self): # 判断是否进入了登录页\n assert \"欢迎登录迅游\" in self.check_welcome_xunyou()\n return self\n\n def click_exit_logged_in(self): # 点击登录页左上角<点击,在加速首页触发的登录,返回加速页\n self.exit_logged_in().click()\n from page.test_accelerate_page import AccelerateHomeAction\n return AccelerateHomeAction(self._driver)\n\n def click_default_area_code(self): # 点击区号按钮\n self.default_area_code().click()\n return self\n\n def click_exit_area_code(self): # 点击区号页左上角<,返回登录页\n self.exit_area_code().click()\n return self\n\n def click_switch_area_code(self): # 点击区号页面阿富汗区号\n self.switch_area_code().click()\n return self\n\n def check_switch_area_code(self): # 查看修改后的区号\n return self.switch_area_code().text\n\n def check_memory_logged_in_number(self): # 查看账号记忆功能文本\n return self.memory_logged_in_number().text",
"step-ids": [
14,
18,
20,
25,
26
]
}
|
[
14,
18,
20,
25,
26
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.