code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
#!/usr/bin/env python3
import sys
import os
import math
import tempfile
import zlib
import lzma
import struct
import bitstruct
# a swf file unpacker and analyzer
# majority of information taken from https://www.adobe.com/devnet/swf.html (version 19)
# some additional information taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart
class SWFFileUnpackingException(Exception):
'''generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values'''
class SWFRect(object):
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return 'SWFRect('+str(self.xmin)+','+str(self.xmax)+','+str(self.ymin)+','+str(self.ymax)+')'
tagCodeTranslation = {
0:'End',
1:'ShowFrame',
2:'DefineShape',
4:'PlaceObject',
5:'RemoveObject',
6:'DefineBits',
7:'DefineButton',
8:'JPEGTables',
9:'SetBackgroundColor',
10:'DefineFont',
11:'DefineText',
12:'DoAction',
13:'DefineFontInfo',
14:'DefineSound',
15:'StartSound',
17:'DefineButtonSound',
18:'SoundStreamHead',
19:'SoundStreamBlock',
20:'DefineBitsLossless',
21:'DefineBitsJPEG2',
22:'DefineShape2',
23:'DefineButtonCxform',
24:'Protect',
26:'PlaceObject2',
28:'RemoveObject2',
32:'DefineShape3',
33:'DefineText2',
34:'DefineButton2',
35:'DefineBitsJPEG3',
36:'DefineBitsLossless2',
37:'DefineEditText',
39:'DefineSprite',
41:'ProductInfo', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart
43:'FrameLabel',
45:'SoundStreamHead2',
46:'DefineMorphShape',
48:'DefineFont2',
56:'ExportAssets',
57:'ImportAssets',
58:'EnableDebugger',
59:'DoInitAction',
60:'DefineVideoStream',
61:'VideoFrame',
62:'DefineFontInfo2',
63:'DebugID', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart
64:'EnableDebugger2',
65:'ScriptLimits',
66:'SetTabIndex',
69:'FileAttributes',
70:'PlaceObject3',
71:'ImportAssets2',
73:'DefineFontAlignZones',
74:'CSMTextSettings',
75:'DefineFont3',
76:'SymbolClass',
77:'Metadata',
78:'DefineScalingGrid',
82:'DoABC',
83:'DefineShape4',
84:'DefineMorphShape2',
86:'DefineSceneAndFrameLabelData',
87:'DefineBinaryData',
88:'DefineFontName',
89:'StartSound2',
90:'DefineBitsJPEG4',
91:'DefineFont4',
93:'EnableTelemetry',
}
class SWFTag(object):
def __init__(self, code, length):
self.code = code
self.length = length
self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')
if self.typeName == '!UNKNOWN!':
print('warning: unknown swf tag code: '+str(self.code))
def isEndTag(self):
return self.typeName == 'End'
def __str__(self):
return 'SWFTag(code='+str(self.code)+' "'+self.typeName+'", length='+str(self.length)+')'
class SWFFile(object):
def __init__(self, filepath):
self.filepath = filepath
self.compression = None
self.version = None
self.fileLength = None
self.frameSize = None
self.frameRate = None
self.frameCount = None
self.tags = []
self.chunkSize = 16 * 4096
self.load()
def load(self):
'''loads the swf file at the filepath'''
self.handle = open(self.filepath, 'rb')
self.unpackHeader1()
print('signature:', self.signature)
print('version:', self.version)
print('fileLength:', self.fileLength)
if self.compression != 'none':
self.decompress()
self.unpackHeader2()
print('frameSize:', self.frameSize)
print('frameRate:', self.frameRate)
print('frameCount:', self.frameCount)
self.unpackTags()
for tag in self.tags:
print(tag)
if tag.typeName == '!UNKNOWN!':
print('warning: unknown tag!')
def decompress(self):
'''replaces the handle with a tempfile handle with all content decompressed'''
temp = tempfile.TemporaryFile('w+b')
if self.compression == 'zlib':
decompressor = zlib.decompressobj()
elif self.compression == 'lzma':
decompressor = lzma.LZMADecompressor()
else:
raise Exception("unknown compression algorithm: "+self.compression)
chunk = self.handle.read(self.chunkSize)
while len(chunk) > 0:
temp.write(decompressor.decompress(chunk))
chunk = self.handle.read(self.chunkSize)
temp.seek(0)
self.handle = temp
def unpackHeader1(self):
'''unpacks the first 8 bytes of the header and figures out what compression there is'''
header = self.handle.read(8)
signature, self.version, self.fileLength = struct.unpack('<3sBI', header)
signature = signature.decode('ascii')
if signature == 'FWS':
self.compression = 'none'
elif signature == 'CWS':
self.compression = 'zlib'
elif signature == 'ZWS':
self.compression = 'lzma'
else:
raise SWFFileUnpackingException('unknown file signature: "'+signature+'"')
self.signature = signature
def unpackHeader2(self):
'''unpacks the rest of the header data that might have been compressed'''
self.frameSize = self.unpackRect()
self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.read(4))
# frameRate is an 8.8 float actually, but i'm not sure how to unpack that...
def unpackRect(self):
data = self.handle.read(1)
size, = bitstruct.unpack('u5', data)
data += self.handle.read(math.ceil((size * 4 - 3) / 8))
xmin, xmax, ymin, ymax = bitstruct.unpack('p5'+('s'+str(size))*4, data)
return SWFRect(xmin, xmax, ymin, ymax)
def unpackTags(self):
sample = self.handle.read(2)
tag = None
while len(sample) > 0:
if tag is not None and tag.isEndTag():
print('warning: swf has tags after an end tag!')
self.handle.seek(-2, os.SEEK_CUR)
tag = self.unpackTag()
self.tags.append(tag)
sample = self.handle.read(2)
def unpackTag(self):
tag = self.unpackTagHeader()
self.handle.read(tag.length)
return tag
def unpackTagHeader(self):
data, = struct.unpack('<H', self.handle.read(2))
tagCode = data >> 6
tagLength = data & 0x3f
if tagLength == 0x3f:
tagLength, = struct.unpack('<I', self.handle.read(4))
return SWFTag(tagCode, tagLength)
def main():
if len(sys.argv) < 2:
print('filepath required')
else:
file = SWFFile(sys.argv[1])
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "4556febd5fddf390f370a8e24871eacf08d34c9f",
"index": 7087,
"step-1": "<mask token>\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\n<mask token>\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SWFFileUnpackingException(Exception):\n <mask token>\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\n<mask token>\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SWFFileUnpackingException(Exception):\n \"\"\"generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values\"\"\"\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\n<mask token>\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('filepath required')\n else:\n file = SWFFile(sys.argv[1])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SWFFileUnpackingException(Exception):\n \"\"\"generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values\"\"\"\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\ntagCodeTranslation = {(0): 'End', (1): 'ShowFrame', (2): 'DefineShape', (4):\n 'PlaceObject', (5): 'RemoveObject', (6): 'DefineBits', (7):\n 'DefineButton', (8): 'JPEGTables', (9): 'SetBackgroundColor', (10):\n 'DefineFont', (11): 'DefineText', (12): 'DoAction', (13):\n 'DefineFontInfo', (14): 'DefineSound', (15): 'StartSound', (17):\n 'DefineButtonSound', (18): 'SoundStreamHead', (19): 'SoundStreamBlock',\n (20): 'DefineBitsLossless', (21): 'DefineBitsJPEG2', (22):\n 'DefineShape2', (23): 'DefineButtonCxform', (24): 'Protect', (26):\n 'PlaceObject2', (28): 'RemoveObject2', (32): 'DefineShape3', (33):\n 'DefineText2', (34): 'DefineButton2', (35): 'DefineBitsJPEG3', (36):\n 'DefineBitsLossless2', (37): 'DefineEditText', (39): 'DefineSprite', (\n 41): 'ProductInfo', (43): 'FrameLabel', (45): 'SoundStreamHead2', (46):\n 'DefineMorphShape', (48): 'DefineFont2', (56): 'ExportAssets', (57):\n 'ImportAssets', (58): 'EnableDebugger', (59): 'DoInitAction', (60):\n 'DefineVideoStream', (61): 'VideoFrame', (62): 'DefineFontInfo2', (63):\n 'DebugID', (64): 'EnableDebugger2', (65): 'ScriptLimits', (66):\n 'SetTabIndex', (69): 'FileAttributes', (70): 'PlaceObject3', (71):\n 'ImportAssets2', (73): 'DefineFontAlignZones', (74): 'CSMTextSettings',\n (75): 'DefineFont3', (76): 'SymbolClass', (77): 'Metadata', (78):\n 'DefineScalingGrid', (82): 'DoABC', (83): 'DefineShape4', (84):\n 'DefineMorphShape2', (86): 'DefineSceneAndFrameLabelData', (87):\n 'DefineBinaryData', (88): 'DefineFontName', (89): 'StartSound2', (90):\n 'DefineBitsJPEG4', (91): 'DefineFont4', (93): 'EnableTelemetry'}\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('filepath required')\n else:\n file = SWFFile(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n\nimport sys\nimport os\nimport math\n\nimport tempfile\n\nimport zlib\nimport lzma\nimport struct\nimport bitstruct\n\n\n\n# a swf file unpacker and analyzer\n# majority of information taken from https://www.adobe.com/devnet/swf.html (version 19)\n# some additional information taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart\n\n\n\nclass SWFFileUnpackingException(Exception):\n\t'''generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values'''\n\nclass SWFRect(object):\n\tdef __init__(self, xmin, xmax, ymin, ymax):\n\t\tself.xmin = xmin\n\t\tself.xmax = xmax\n\t\tself.ymin = ymin\n\t\tself.ymax = ymax\n\tdef __str__(self):\n\t\treturn 'SWFRect('+str(self.xmin)+','+str(self.xmax)+','+str(self.ymin)+','+str(self.ymax)+')'\n\n\ntagCodeTranslation = {\n\t0:'End',\n\t1:'ShowFrame',\n\t2:'DefineShape',\n\t4:'PlaceObject',\n\t5:'RemoveObject',\n\t6:'DefineBits',\n\t7:'DefineButton',\n\t8:'JPEGTables',\n\t9:'SetBackgroundColor',\n\t10:'DefineFont',\n\t11:'DefineText',\n\t12:'DoAction',\n\t13:'DefineFontInfo',\n\t14:'DefineSound',\n\t15:'StartSound',\n\t17:'DefineButtonSound',\n\t18:'SoundStreamHead',\n\t19:'SoundStreamBlock',\n\t20:'DefineBitsLossless',\n\t21:'DefineBitsJPEG2',\n\t22:'DefineShape2',\n\t23:'DefineButtonCxform',\n\t24:'Protect',\n\t26:'PlaceObject2',\n\t28:'RemoveObject2',\n\t32:'DefineShape3',\n\t33:'DefineText2',\n\t34:'DefineButton2',\n\t35:'DefineBitsJPEG3',\n\t36:'DefineBitsLossless2',\n\t37:'DefineEditText',\n\t39:'DefineSprite',\n\t41:'ProductInfo', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart\n\t43:'FrameLabel',\n\t45:'SoundStreamHead2',\n\t46:'DefineMorphShape',\n\t48:'DefineFont2',\n\t56:'ExportAssets',\n\t57:'ImportAssets',\n\t58:'EnableDebugger',\n\t59:'DoInitAction',\n\t60:'DefineVideoStream',\n\t61:'VideoFrame',\n\t62:'DefineFontInfo2',\n\t63:'DebugID', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart\n\t64:'EnableDebugger2',\n\t65:'ScriptLimits',\n\t66:'SetTabIndex',\n\t69:'FileAttributes',\n\t70:'PlaceObject3',\n\t71:'ImportAssets2',\n\t73:'DefineFontAlignZones',\n\t74:'CSMTextSettings',\n\t75:'DefineFont3',\n\t76:'SymbolClass',\n\t77:'Metadata',\n\t78:'DefineScalingGrid',\n\t82:'DoABC',\n\t83:'DefineShape4',\n\t84:'DefineMorphShape2',\n\t86:'DefineSceneAndFrameLabelData',\n\t87:'DefineBinaryData',\n\t88:'DefineFontName',\n\t89:'StartSound2',\n\t90:'DefineBitsJPEG4',\n\t91:'DefineFont4',\n\t93:'EnableTelemetry',\n}\n\n\nclass SWFTag(object):\n\tdef __init__(self, code, length):\n\t\tself.code = code\n\t\tself.length = length\n\n\t\tself.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n\t\tif self.typeName == '!UNKNOWN!':\n\t\t\tprint('warning: unknown swf tag code: '+str(self.code))\n\tdef isEndTag(self):\n\t\treturn self.typeName == 'End'\n\tdef __str__(self):\n\t\treturn 'SWFTag(code='+str(self.code)+' \"'+self.typeName+'\", length='+str(self.length)+')'\n\n\nclass SWFFile(object):\n\tdef __init__(self, filepath):\n\t\tself.filepath = filepath\n\n\t\tself.compression = None\n\t\tself.version = None\n\t\tself.fileLength = None\n\t\tself.frameSize = None\n\t\tself.frameRate = None\n\t\tself.frameCount = None\n\n\t\tself.tags = []\n\n\t\tself.chunkSize = 16 * 4096\n\n\t\tself.load()\n\n\tdef load(self):\n\t\t'''loads the swf file at the filepath'''\n\t\tself.handle = open(self.filepath, 'rb')\n\n\t\tself.unpackHeader1()\n\t\tprint('signature:', self.signature)\n\t\tprint('version:', self.version)\n\t\tprint('fileLength:', self.fileLength)\n\n\t\tif self.compression != 'none':\n\t\t\tself.decompress()\n\n\t\tself.unpackHeader2()\n\n\t\tprint('frameSize:', self.frameSize)\n\t\tprint('frameRate:', self.frameRate)\n\t\tprint('frameCount:', self.frameCount)\n\n\t\tself.unpackTags()\n\t\tfor tag in self.tags:\n\t\t\tprint(tag)\n\t\t\tif tag.typeName == '!UNKNOWN!':\n\t\t\t\tprint('warning: unknown tag!')\n\n\n\tdef decompress(self):\n\t\t'''replaces the handle with a tempfile handle with all content decompressed'''\n\t\ttemp = tempfile.TemporaryFile('w+b')\n\t\tif self.compression == 'zlib':\n\t\t\tdecompressor = zlib.decompressobj()\n\t\telif self.compression == 'lzma':\n\t\t\tdecompressor = lzma.LZMADecompressor()\n\t\telse:\n\t\t\traise Exception(\"unknown compression algorithm: \"+self.compression)\n\t\tchunk = self.handle.read(self.chunkSize)\n\t\twhile len(chunk) > 0:\n\t\t\ttemp.write(decompressor.decompress(chunk))\n\t\t\tchunk = self.handle.read(self.chunkSize)\n\t\ttemp.seek(0)\n\t\tself.handle = temp\n\n\tdef unpackHeader1(self):\n\t\t'''unpacks the first 8 bytes of the header and figures out what compression there is'''\n\t\theader = self.handle.read(8)\n\t\tsignature, self.version, self.fileLength = struct.unpack('<3sBI', header)\n\n\t\tsignature = signature.decode('ascii')\n\t\tif signature == 'FWS':\n\t\t\tself.compression = 'none'\n\t\telif signature == 'CWS':\n\t\t\tself.compression = 'zlib'\n\t\telif signature == 'ZWS':\n\t\t\tself.compression = 'lzma'\n\t\telse:\n\t\t\traise SWFFileUnpackingException('unknown file signature: \"'+signature+'\"')\n\n\t\tself.signature = signature\n\n\tdef unpackHeader2(self):\n\t\t'''unpacks the rest of the header data that might have been compressed'''\n\t\tself.frameSize = self.unpackRect()\n\t\tself.frameRate, self.frameCount = struct.unpack('<HH', self.handle.read(4))\n\t\t# frameRate is an 8.8 float actually, but i'm not sure how to unpack that...\n\n\tdef unpackRect(self):\n\t\tdata = self.handle.read(1)\n\t\tsize, = bitstruct.unpack('u5', data)\n\t\tdata += self.handle.read(math.ceil((size * 4 - 3) / 8))\n\t\txmin, xmax, ymin, ymax = bitstruct.unpack('p5'+('s'+str(size))*4, data)\n\t\treturn SWFRect(xmin, xmax, ymin, ymax)\n\n\tdef unpackTags(self):\n\t\tsample = self.handle.read(2)\n\t\ttag = None\n\t\twhile len(sample) > 0:\n\t\t\tif tag is not None and tag.isEndTag():\n\t\t\t\tprint('warning: swf has tags after an end tag!')\n\t\t\tself.handle.seek(-2, os.SEEK_CUR)\n\t\t\ttag = self.unpackTag()\n\t\t\tself.tags.append(tag)\n\n\t\t\tsample = self.handle.read(2)\n\n\tdef unpackTag(self):\n\t\ttag = self.unpackTagHeader()\n\t\tself.handle.read(tag.length)\n\t\treturn tag\n\tdef unpackTagHeader(self):\n\t\tdata, = struct.unpack('<H', self.handle.read(2))\n\t\ttagCode = data >> 6\n\t\ttagLength = data & 0x3f\n\t\tif tagLength == 0x3f:\n\t\t\ttagLength, = struct.unpack('<I', self.handle.read(4))\n\t\treturn SWFTag(tagCode, tagLength)\n\n\n\n\ndef main():\n\tif len(sys.argv) < 2:\n\t\tprint('filepath required')\n\telse:\n\t\tfile = SWFFile(sys.argv[1])\n\n\nif __name__ == '__main__':\n\tmain()\n",
"step-ids": [
17,
18,
20,
22,
24
]
}
|
[
17,
18,
20,
22,
24
] |
# Generated by Django 2.2.7 on 2019-11-15 23:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quizzapp', '0005_auto_20191115_2339'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='titre',
),
]
|
normal
|
{
"blob_id": "b2fa6104f03dc76522a51f352101cef199ddc665",
"index": 675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('quizzapp', '0005_auto_20191115_2339')]\n operations = [migrations.RemoveField(model_name='question', name='titre')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('quizzapp', '0005_auto_20191115_2339')]\n operations = [migrations.RemoveField(model_name='question', name='titre')]\n",
"step-5": "# Generated by Django 2.2.7 on 2019-11-15 23:43\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quizzapp', '0005_auto_20191115_2339'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='question',\n name='titre',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-24 22:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0026_auto_20160712_1541'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('addr1', models.CharField(blank=True, max_length=50, null=True)),
('addr2', models.CharField(blank=True, max_length=50, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('zip_code', models.CharField(blank=True, max_length=20, null=True)),
('phone_main', models.CharField(blank=True, max_length=20, null=True)),
('phone_other', models.CharField(blank=True, max_length=20, null=True)),
('notes', models.TextField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='user',
name='location',
),
migrations.AddField(
model_name='user',
name='location',
field=models.ManyToManyField(blank=True, null=True, related_name='user_location', to='users.Location'),
),
]
|
normal
|
{
"blob_id": "04c1765e6c2302098be2a7f3242dfd536683f742",
"index": 6138,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0026_auto_20160712_1541')]\n operations = [migrations.CreateModel(name='Location', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(blank=True,\n max_length=50, null=True)), ('addr1', models.CharField(blank=True,\n max_length=50, null=True)), ('addr2', models.CharField(blank=True,\n max_length=50, null=True)), ('city', models.CharField(blank=True,\n max_length=50, null=True)), ('state', models.CharField(blank=True,\n max_length=50, null=True)), ('zip_code', models.CharField(blank=\n True, max_length=20, null=True)), ('phone_main', models.CharField(\n blank=True, max_length=20, null=True)), ('phone_other', models.\n CharField(blank=True, max_length=20, null=True)), ('notes', models.\n TextField(blank=True, null=True))]), migrations.RemoveField(\n model_name='user', name='location'), migrations.AddField(model_name\n ='user', name='location', field=models.ManyToManyField(blank=True,\n null=True, related_name='user_location', to='users.Location'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0026_auto_20160712_1541')]\n operations = [migrations.CreateModel(name='Location', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(blank=True,\n max_length=50, null=True)), ('addr1', models.CharField(blank=True,\n max_length=50, null=True)), ('addr2', models.CharField(blank=True,\n max_length=50, null=True)), ('city', models.CharField(blank=True,\n max_length=50, null=True)), ('state', models.CharField(blank=True,\n max_length=50, null=True)), ('zip_code', models.CharField(blank=\n True, max_length=20, null=True)), ('phone_main', models.CharField(\n blank=True, max_length=20, null=True)), ('phone_other', models.\n CharField(blank=True, max_length=20, null=True)), ('notes', models.\n TextField(blank=True, null=True))]), migrations.RemoveField(\n model_name='user', name='location'), migrations.AddField(model_name\n ='user', name='location', field=models.ManyToManyField(blank=True,\n null=True, related_name='user_location', to='users.Location'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2016-08-24 22:13\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0026_auto_20160712_1541'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Location',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(blank=True, max_length=50, null=True)),\n ('addr1', models.CharField(blank=True, max_length=50, null=True)),\n ('addr2', models.CharField(blank=True, max_length=50, null=True)),\n ('city', models.CharField(blank=True, max_length=50, null=True)),\n ('state', models.CharField(blank=True, max_length=50, null=True)),\n ('zip_code', models.CharField(blank=True, max_length=20, null=True)),\n ('phone_main', models.CharField(blank=True, max_length=20, null=True)),\n ('phone_other', models.CharField(blank=True, max_length=20, null=True)),\n ('notes', models.TextField(blank=True, null=True)),\n ],\n ),\n migrations.RemoveField(\n model_name='user',\n name='location',\n ),\n migrations.AddField(\n model_name='user',\n name='location',\n field=models.ManyToManyField(blank=True, null=True, related_name='user_location', to='users.Location'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
sys.path.append("..")
from packages import bitso as BS
from packages import account as ACCOUNT
from packages import currency_pair as CP
account=ACCOUNT.Account('577e4a03-540f9610-f686d434-qz5c4v5b6n','dd7b02f5-c286e9d4-f2cc78c3-bfab3')
bs=BS.Bitso(account)
currency_pair=CP.CurrencyPair('btc','xmn')
depth=bs.depth(currency_pair)
a=1
|
normal
|
{
"blob_id": "03147de944c4f75417006a5087e75354dba644ec",
"index": 6339,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('..')\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\naccount = ACCOUNT.Account('577e4a03-540f9610-f686d434-qz5c4v5b6n',\n 'dd7b02f5-c286e9d4-f2cc78c3-bfab3')\nbs = BS.Bitso(account)\ncurrency_pair = CP.CurrencyPair('btc', 'xmn')\ndepth = bs.depth(currency_pair)\na = 1\n",
"step-4": "import sys\nsys.path.append('..')\nfrom packages import bitso as BS\nfrom packages import account as ACCOUNT\nfrom packages import currency_pair as CP\naccount = ACCOUNT.Account('577e4a03-540f9610-f686d434-qz5c4v5b6n',\n 'dd7b02f5-c286e9d4-f2cc78c3-bfab3')\nbs = BS.Bitso(account)\ncurrency_pair = CP.CurrencyPair('btc', 'xmn')\ndepth = bs.depth(currency_pair)\na = 1\n",
"step-5": "import sys\nsys.path.append(\"..\")\nfrom packages import bitso as BS\nfrom packages import account as ACCOUNT\nfrom packages import currency_pair as CP\n\naccount=ACCOUNT.Account('577e4a03-540f9610-f686d434-qz5c4v5b6n','dd7b02f5-c286e9d4-f2cc78c3-bfab3')\nbs=BS.Bitso(account)\n\ncurrency_pair=CP.CurrencyPair('btc','xmn')\ndepth=bs.depth(currency_pair)\na=1\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.autodiscover()
<|reserved_special_token_0|>
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.autodiscover()
urlpatterns = patterns('', ('', include(application.urls)), url('^admin/',
include(admin.site.urls)), url('^logout$', logout, name='logout'))
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
from django.conf.urls import patterns, include, url
from django.contrib.auth.views import login, logout
from django.contrib import admin
from magmag_core.app import application
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from magmag import settings
admin.autodiscover()
urlpatterns = patterns('', ('', include(application.urls)), url('^admin/',
include(admin.site.urls)), url('^logout$', logout, name='logout'))
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib.auth.views import login, logout
from django.contrib import admin
from magmag_core.app import application
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from magmag import settings
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'magmag.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
(r'', include(application.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^logout$', logout,name='logout' ),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
flexible
|
{
"blob_id": "538e582df7bfcf281973a5296adc14ca067be0a5",
"index": 2581,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.autodiscover()\n<mask token>\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-3": "<mask token>\nadmin.autodiscover()\nurlpatterns = patterns('', ('', include(application.urls)), url('^admin/',\n include(admin.site.urls)), url('^logout$', logout, name='logout'))\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-4": "from django.conf.urls import patterns, include, url\nfrom django.contrib.auth.views import login, logout\nfrom django.contrib import admin\nfrom magmag_core.app import application\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\nfrom magmag import settings\nadmin.autodiscover()\nurlpatterns = patterns('', ('', include(application.urls)), url('^admin/',\n include(admin.site.urls)), url('^logout$', logout, name='logout'))\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib.auth.views import login, logout\nfrom django.contrib import admin\nfrom magmag_core.app import application\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\nfrom magmag import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'magmag.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n (r'', include(application.urls)),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^logout$', logout,name='logout' ),\n)\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/python
import math
import sys
import os
import subprocess
#PTYPES = [ "eth_ip_udp_head_t", "ip_udp_head_t", "eth_32ip_udp_head_t", "eth_64ip_udp_head_t", "eth64_64ip64_64udp_head_t", "eth6464_64ip64_64udp_head_t" ]
#PTYPES = [ "eth_ip_udp_head_t", "eth_32ip_udp_head_t", "eth_64ip_udp_head_t", "eth64_64ip64_64udp_head_t", "eth6464_64ip64_64udp_head_t" ]
PTYPE = "volatile eth_ip_udp_head_t"
#PTYPE = "volatile eth6464_64ip64_64udp_head_t"
def log_out(out):
print(out[:-1])
def run_proc(p, wait):
if not wait:
pid = os.fork()
if pid != 0:
return
proc = subprocess.Popen(p, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
log_out("STDERR -- %s\n" % p)
for line in proc.stderr:
log_out(line)
log_out("STDOUT -- %s\n" % p)
for line in proc.stdout:
log_out(line)
if not wait:
sys.exit(0)
args = []
for i in [1,10] + \
range(100,1000,200) + \
range(1000,10 *1000, 1000) + \
range(10 * 1000,100 * 1000, 20 * 1000) + \
range(100 * 1000, 1000 * 1000, 200 * 1000) + \
range(1000 * 1000, 5 * 1000 * 1000, 2000 * 1000):
packet_count = i
outdir = "experiments/baseline"
test_id = "%010i" % (packet_count)
args.append( "%s/%s.stats %4.2fMB" % (outdir, test_id, i * 2048 / 1024.0 / 1024.0))
cmd = "./plot_fast_net.py RD %s baseline-rd.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,False)
cmd = "./plot_fast_net.py WR %s baseline-wr.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,True)
cmd = "./plot_fast_net.py APRD %s baseline-aprd.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,False)
cmd = "./plot_fast_net.py APWR %s baseline-apwr.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,True)
|
normal
|
{
"blob_id": "9101fc5b8ba04a1b72e0c79d5bf3e4118e1bad75",
"index": 5676,
"step-1": "#! /usr/bin/python\n\nimport math\nimport sys\nimport os\nimport subprocess\n\n\n#PTYPES = [ \"eth_ip_udp_head_t\", \"ip_udp_head_t\", \"eth_32ip_udp_head_t\", \"eth_64ip_udp_head_t\", \"eth64_64ip64_64udp_head_t\", \"eth6464_64ip64_64udp_head_t\" ]\n#PTYPES = [ \"eth_ip_udp_head_t\", \"eth_32ip_udp_head_t\", \"eth_64ip_udp_head_t\", \"eth64_64ip64_64udp_head_t\", \"eth6464_64ip64_64udp_head_t\" ]\nPTYPE = \"volatile eth_ip_udp_head_t\" \n#PTYPE = \"volatile eth6464_64ip64_64udp_head_t\" \n\ndef log_out(out): \n print(out[:-1])\n\n\ndef run_proc(p, wait):\n if not wait: \n pid = os.fork()\n if pid != 0:\n return\n\n \n proc = subprocess.Popen(p, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n proc.wait()\n\n log_out(\"STDERR -- %s\\n\" % p)\n for line in proc.stderr:\n log_out(line)\n\n log_out(\"STDOUT -- %s\\n\" % p)\n for line in proc.stdout:\n log_out(line)\n\n if not wait:\n\t sys.exit(0)\n\n\nargs = []\nfor i in [1,10] + \\\n range(100,1000,200) + \\\n range(1000,10 *1000, 1000) + \\\n range(10 * 1000,100 * 1000, 20 * 1000) + \\\n range(100 * 1000, 1000 * 1000, 200 * 1000) + \\\n range(1000 * 1000, 5 * 1000 * 1000, 2000 * 1000): \n packet_count = i \n\n outdir = \"experiments/baseline\"\n\n test_id = \"%010i\" % (packet_count)\n\n args.append( \"%s/%s.stats %4.2fMB\" % (outdir, test_id, i * 2048 / 1024.0 / 1024.0)) \n\n\ncmd = \"./plot_fast_net.py RD %s baseline-rd.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,False)\n\ncmd = \"./plot_fast_net.py WR %s baseline-wr.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,True)\n\ncmd = \"./plot_fast_net.py APRD %s baseline-aprd.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,False)\n\ncmd = \"./plot_fast_net.py APWR %s baseline-apwr.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,True)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',
'Dragon Buster Destruction Sword']
hand_2playable_hts = ['Nibiru, the Primal Being',
'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']
hand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',
'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',
'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
<|reserved_special_token_1|>
import sys
from Decks.Virtual_World.vw_sets import *
from tools import *
hand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',
'Dragon Buster Destruction Sword']
hand_2playable_hts = ['Nibiru, the Primal Being',
'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']
hand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',
'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',
'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
<|reserved_special_token_1|>
import sys
from Decks.Virtual_World.vw_sets import *
from tools import *
hand_3playable_hts = ["Nibiru, the Primal Being", "Effect Veiler", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword", "Dragon Buster Destruction Sword"]
hand_2playable_hts = ["Nibiru, the Primal Being", "Nibiru, the Primal Being", "Fantastical Dragon Phantazmay", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword"]
hand_3lvl3vw = ["Virtual World Mai-Hime - Lulu", "Virtual World Xiezhi - Jiji", "Virtual World Xiezhi - Jiji", "Virtual World Kirin - Lili", "Virtual World Roshi - Laolao"]
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
|
flexible
|
{
"blob_id": "43179b8b096836758271a791b4aacb7bbe398ea9",
"index": 1807,
"step-1": "<mask token>\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-3": "<mask token>\nhand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',\n 'Dragon Buster Destruction Sword']\nhand_2playable_hts = ['Nibiru, the Primal Being',\n 'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']\nhand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',\n 'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',\n 'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-4": "import sys\nfrom Decks.Virtual_World.vw_sets import *\nfrom tools import *\nhand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',\n 'Dragon Buster Destruction Sword']\nhand_2playable_hts = ['Nibiru, the Primal Being',\n 'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']\nhand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',\n 'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',\n 'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-5": "import sys\nfrom Decks.Virtual_World.vw_sets import *\nfrom tools import *\n\nhand_3playable_hts = [\"Nibiru, the Primal Being\", \"Effect Veiler\", \"Fantastical Dragon Phantazmay\", \"Dragon Buster Destruction Sword\", \"Dragon Buster Destruction Sword\"]\nhand_2playable_hts = [\"Nibiru, the Primal Being\", \"Nibiru, the Primal Being\", \"Fantastical Dragon Phantazmay\", \"Fantastical Dragon Phantazmay\", \"Dragon Buster Destruction Sword\"]\nhand_3lvl3vw = [\"Virtual World Mai-Hime - Lulu\", \"Virtual World Xiezhi - Jiji\", \"Virtual World Xiezhi - Jiji\", \"Virtual World Kirin - Lili\", \"Virtual World Roshi - Laolao\"]\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('training_area', '0006_remove_event_day')]
operations = [migrations.CreateModel(name='Comment', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created_at', models.DateTimeField(
auto_now_add=True, null=True)), ('updated_at', models.DateTimeField
(auto_now=True)), ('content', models.TextField(verbose_name=
'Content'))]), migrations.CreateModel(name='Notifications', fields=
[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_at', models.
DateTimeField(auto_now_add=True, null=True)), ('title', models.
CharField(max_length=150, verbose_name='Title')), ('content',
models.TextField(verbose_name='Content'))]), migrations.AlterField(
model_name='user', name='email', field=models.EmailField(blank=True,
max_length=255)), migrations.AddField(model_name='notifications',
name='reciever', field=models.ForeignKey(null=True, on_delete=
django.db.models.deletion.CASCADE, related_name='notif_recieve', to
=settings.AUTH_USER_MODEL)), migrations.AddField(model_name=
'notifications', name='sender', field=models.ForeignKey(null=True,
on_delete=django.db.models.deletion.CASCADE, related_name=
'notif_send', to=settings.AUTH_USER_MODEL)), migrations.AddField(
model_name='comment', name='creator', field=models.ForeignKey(null=
True, on_delete=django.db.models.deletion.CASCADE, related_name=
'user_comments', to=settings.AUTH_USER_MODEL)), migrations.AddField
(model_name='comment', name='workout', field=models.ForeignKey(null
=True, on_delete=django.db.models.deletion.CASCADE, related_name=
'comments', to='training_area.Workout'))]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('training_area', '0006_remove_event_day')]
operations = [migrations.CreateModel(name='Comment', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created_at', models.DateTimeField(
auto_now_add=True, null=True)), ('updated_at', models.DateTimeField
(auto_now=True)), ('content', models.TextField(verbose_name=
'Content'))]), migrations.CreateModel(name='Notifications', fields=
[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_at', models.
DateTimeField(auto_now_add=True, null=True)), ('title', models.
CharField(max_length=150, verbose_name='Title')), ('content',
models.TextField(verbose_name='Content'))]), migrations.AlterField(
model_name='user', name='email', field=models.EmailField(blank=True,
max_length=255)), migrations.AddField(model_name='notifications',
name='reciever', field=models.ForeignKey(null=True, on_delete=
django.db.models.deletion.CASCADE, related_name='notif_recieve', to
=settings.AUTH_USER_MODEL)), migrations.AddField(model_name=
'notifications', name='sender', field=models.ForeignKey(null=True,
on_delete=django.db.models.deletion.CASCADE, related_name=
'notif_send', to=settings.AUTH_USER_MODEL)), migrations.AddField(
model_name='comment', name='creator', field=models.ForeignKey(null=
True, on_delete=django.db.models.deletion.CASCADE, related_name=
'user_comments', to=settings.AUTH_USER_MODEL)), migrations.AddField
(model_name='comment', name='workout', field=models.ForeignKey(null
=True, on_delete=django.db.models.deletion.CASCADE, related_name=
'comments', to='training_area.Workout'))]
<|reserved_special_token_1|>
# Generated by Django 2.1.7 on 2019-03-18 02:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('training_area', '0006_remove_event_day'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('content', models.TextField(verbose_name='Content')),
],
),
migrations.CreateModel(
name='Notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('title', models.CharField(max_length=150, verbose_name='Title')),
('content', models.TextField(verbose_name='Content')),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255),
),
migrations.AddField(
model_name='notifications',
name='reciever',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notif_recieve', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='notifications',
name='sender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notif_send', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_comments', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='workout',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='training_area.Workout'),
),
]
|
flexible
|
{
"blob_id": "9905559909f10831373e659cde0f275dc5d71e0d",
"index": 7041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('training_area', '0006_remove_event_day')]\n operations = [migrations.CreateModel(name='Comment', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created_at', models.DateTimeField(\n auto_now_add=True, null=True)), ('updated_at', models.DateTimeField\n (auto_now=True)), ('content', models.TextField(verbose_name=\n 'Content'))]), migrations.CreateModel(name='Notifications', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_at', models.\n DateTimeField(auto_now_add=True, null=True)), ('title', models.\n CharField(max_length=150, verbose_name='Title')), ('content',\n models.TextField(verbose_name='Content'))]), migrations.AlterField(\n model_name='user', name='email', field=models.EmailField(blank=True,\n max_length=255)), migrations.AddField(model_name='notifications',\n name='reciever', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='notif_recieve', to\n =settings.AUTH_USER_MODEL)), migrations.AddField(model_name=\n 'notifications', name='sender', field=models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'notif_send', to=settings.AUTH_USER_MODEL)), migrations.AddField(\n model_name='comment', name='creator', field=models.ForeignKey(null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'user_comments', to=settings.AUTH_USER_MODEL)), migrations.AddField\n (model_name='comment', name='workout', field=models.ForeignKey(null\n =True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to='training_area.Workout'))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('training_area', '0006_remove_event_day')]\n operations = [migrations.CreateModel(name='Comment', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created_at', models.DateTimeField(\n auto_now_add=True, null=True)), ('updated_at', models.DateTimeField\n (auto_now=True)), ('content', models.TextField(verbose_name=\n 'Content'))]), migrations.CreateModel(name='Notifications', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_at', models.\n DateTimeField(auto_now_add=True, null=True)), ('title', models.\n CharField(max_length=150, verbose_name='Title')), ('content',\n models.TextField(verbose_name='Content'))]), migrations.AlterField(\n model_name='user', name='email', field=models.EmailField(blank=True,\n max_length=255)), migrations.AddField(model_name='notifications',\n name='reciever', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='notif_recieve', to\n =settings.AUTH_USER_MODEL)), migrations.AddField(model_name=\n 'notifications', name='sender', field=models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'notif_send', to=settings.AUTH_USER_MODEL)), migrations.AddField(\n model_name='comment', name='creator', field=models.ForeignKey(null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'user_comments', to=settings.AUTH_USER_MODEL)), migrations.AddField\n (model_name='comment', name='workout', field=models.ForeignKey(null\n =True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to='training_area.Workout'))]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-03-18 02:25\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('training_area', '0006_remove_event_day'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('content', models.TextField(verbose_name='Content')),\n ],\n ),\n migrations.CreateModel(\n name='Notifications',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True)),\n ('title', models.CharField(max_length=150, verbose_name='Title')),\n ('content', models.TextField(verbose_name='Content')),\n ],\n ),\n migrations.AlterField(\n model_name='user',\n name='email',\n field=models.EmailField(blank=True, max_length=255),\n ),\n migrations.AddField(\n model_name='notifications',\n name='reciever',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notif_recieve', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='notifications',\n name='sender',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notif_send', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='comment',\n name='creator',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_comments', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='comment',\n name='workout',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='training_area.Workout'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
n, a, b = map(int, input().split())
print(tort(n, a, b))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def tort(n, a, b):
return min(n * a, b)
def main():
n, a, b = map(int, input().split())
print(tort(n, a, b))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def tort(n, a, b):
return min(n * a, b)
def main():
n, a, b = map(int, input().split())
print(tort(n, a, b))
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "7c06bd52c924d3e401f50625109c5b8b489df157",
"index": 7434,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n n, a, b = map(int, input().split())\n print(tort(n, a, b))\n\n\n<mask token>\n",
"step-3": "def tort(n, a, b):\n return min(n * a, b)\n\n\ndef main():\n n, a, b = map(int, input().split())\n print(tort(n, a, b))\n\n\n<mask token>\n",
"step-4": "def tort(n, a, b):\n return min(n * a, b)\n\n\ndef main():\n n, a, b = map(int, input().split())\n print(tort(n, a, b))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Author: Dustin Spicuzza
Date: 3/22/2012
Description:
This mode only feeds another robot, does not move or anything
'''
class FeedOnlyAutonomousMode(object):
# this name should be descriptive and unique. This will be shown to the user
# on the SmartDashboard
MODE_NAME = "Feed Only"
DEFAULT = False
def __init__(self, drive, ramp_arm, ball_handler, robot_manager):
'''Constructor: store components locally here'''
self.drive = drive
self.ramp_arm = ramp_arm
self.ball_handler = ball_handler
self.robot_manager = robot_manager
def OnEnable(self):
'''
This function is called when Autonomous mode is enabled. You should
initialize things needed for your mode here
'''
pass
def OnDisable(self):
'''
This function is called when Autonomous mode is exiting. You should
clean anything up here that needs to be cleaned up
'''
pass
def Update(self, time_elapsed):
'''
This function is called every 10ms or so. This is where you should
make decisions about what needs to happen in your autonomous mode.
You do not need to call the 'Update' functions of any components
here, as they will be called for you automatically.
time_elapsed is the number of seconds that autonomous mode has been
active, in case your mode finds that useful.
'''
self.ball_handler.chamber.Remove()
self.ball_handler.feeder.Expel()
|
normal
|
{
"blob_id": "3596ef12ce407a8d84319daa38a27a99ed0de763",
"index": 5208,
"step-1": "<mask token>\n\n\nclass FeedOnlyAutonomousMode(object):\n <mask token>\n <mask token>\n <mask token>\n\n def OnEnable(self):\n \"\"\"\n This function is called when Autonomous mode is enabled. You should\n initialize things needed for your mode here\n \"\"\"\n pass\n\n def OnDisable(self):\n \"\"\"\n This function is called when Autonomous mode is exiting. You should\n clean anything up here that needs to be cleaned up\n \"\"\"\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FeedOnlyAutonomousMode(object):\n <mask token>\n <mask token>\n\n def __init__(self, drive, ramp_arm, ball_handler, robot_manager):\n \"\"\"Constructor: store components locally here\"\"\"\n self.drive = drive\n self.ramp_arm = ramp_arm\n self.ball_handler = ball_handler\n self.robot_manager = robot_manager\n\n def OnEnable(self):\n \"\"\"\n This function is called when Autonomous mode is enabled. You should\n initialize things needed for your mode here\n \"\"\"\n pass\n\n def OnDisable(self):\n \"\"\"\n This function is called when Autonomous mode is exiting. You should\n clean anything up here that needs to be cleaned up\n \"\"\"\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FeedOnlyAutonomousMode(object):\n <mask token>\n <mask token>\n\n def __init__(self, drive, ramp_arm, ball_handler, robot_manager):\n \"\"\"Constructor: store components locally here\"\"\"\n self.drive = drive\n self.ramp_arm = ramp_arm\n self.ball_handler = ball_handler\n self.robot_manager = robot_manager\n\n def OnEnable(self):\n \"\"\"\n This function is called when Autonomous mode is enabled. You should\n initialize things needed for your mode here\n \"\"\"\n pass\n\n def OnDisable(self):\n \"\"\"\n This function is called when Autonomous mode is exiting. You should\n clean anything up here that needs to be cleaned up\n \"\"\"\n pass\n\n def Update(self, time_elapsed):\n \"\"\"\n This function is called every 10ms or so. This is where you should\n make decisions about what needs to happen in your autonomous mode.\n You do not need to call the 'Update' functions of any components\n here, as they will be called for you automatically.\n \n time_elapsed is the number of seconds that autonomous mode has been\n active, in case your mode finds that useful. \n \"\"\"\n self.ball_handler.chamber.Remove()\n self.ball_handler.feeder.Expel()\n",
"step-4": "<mask token>\n\n\nclass FeedOnlyAutonomousMode(object):\n MODE_NAME = 'Feed Only'\n DEFAULT = False\n\n def __init__(self, drive, ramp_arm, ball_handler, robot_manager):\n \"\"\"Constructor: store components locally here\"\"\"\n self.drive = drive\n self.ramp_arm = ramp_arm\n self.ball_handler = ball_handler\n self.robot_manager = robot_manager\n\n def OnEnable(self):\n \"\"\"\n This function is called when Autonomous mode is enabled. You should\n initialize things needed for your mode here\n \"\"\"\n pass\n\n def OnDisable(self):\n \"\"\"\n This function is called when Autonomous mode is exiting. You should\n clean anything up here that needs to be cleaned up\n \"\"\"\n pass\n\n def Update(self, time_elapsed):\n \"\"\"\n This function is called every 10ms or so. This is where you should\n make decisions about what needs to happen in your autonomous mode.\n You do not need to call the 'Update' functions of any components\n here, as they will be called for you automatically.\n \n time_elapsed is the number of seconds that autonomous mode has been\n active, in case your mode finds that useful. \n \"\"\"\n self.ball_handler.chamber.Remove()\n self.ball_handler.feeder.Expel()\n",
"step-5": "'''\n Author: Dustin Spicuzza\n Date: 3/22/2012\n \n Description:\n \n This mode only feeds another robot, does not move or anything\n'''\n\nclass FeedOnlyAutonomousMode(object):\n\n # this name should be descriptive and unique. This will be shown to the user\n # on the SmartDashboard\n MODE_NAME = \"Feed Only\"\n DEFAULT = False\n\n\n def __init__(self, drive, ramp_arm, ball_handler, robot_manager):\n '''Constructor: store components locally here'''\n self.drive = drive\n self.ramp_arm = ramp_arm\n self.ball_handler = ball_handler\n self.robot_manager = robot_manager\n \n def OnEnable(self):\n '''\n This function is called when Autonomous mode is enabled. You should\n initialize things needed for your mode here\n '''\n \n pass\n \n def OnDisable(self):\n '''\n This function is called when Autonomous mode is exiting. You should\n clean anything up here that needs to be cleaned up\n '''\n \n pass\n \n def Update(self, time_elapsed):\n '''\n This function is called every 10ms or so. This is where you should\n make decisions about what needs to happen in your autonomous mode.\n You do not need to call the 'Update' functions of any components\n here, as they will be called for you automatically.\n \n time_elapsed is the number of seconds that autonomous mode has been\n active, in case your mode finds that useful. \n '''\n \n self.ball_handler.chamber.Remove()\n self.ball_handler.feeder.Expel()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
""" GetState
Usage:
get_state.py <pem-file> <ip-file> [options]
Options:
-h, --help print help message and exit
--output DIR set the output directory [default: logs]
"""
from docopt import docopt
import paramiko
import os
def get_logs(ip_addr, pem_file, log_dir):
pem = paramiko.RSAKey.from_private_key_file(pem_file)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip_addr, username="ec2-user", pkey=pem)
ftp = client.open_sftp()
logs = sorted(ftp.listdir('/home/ec2-user/logs/'))
for l in logs:
if l.endswith('.txt'):
print(l)
client.exec_command(f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')
ftp.get(f'/home/ec2-user/logs/tmp', f"{log_dir}/{l}")
client.exec_command('rm /home/ec2-user/logs/tmp')
ftp.close()
client.close()
if __name__ == '__main__':
args = docopt(__doc__)
for ip in open(args['<ip-file>']):
os.system(f"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}")
#get_logs(ip.strip(), args['<pem-file>'], args['--output'])
|
normal
|
{
"blob_id": "a1df804325a074ed980ec864c72fe231e2968997",
"index": 4024,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(\n f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(\n f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n for ip in open(args['<ip-file>']):\n os.system(\n f\"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}\"\n )\n",
"step-4": "<mask token>\nfrom docopt import docopt\nimport paramiko\nimport os\n\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(\n f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n for ip in open(args['<ip-file>']):\n os.system(\n f\"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}\"\n )\n",
"step-5": "\"\"\" GetState\nUsage:\n get_state.py <pem-file> <ip-file> [options]\n\nOptions:\n -h, --help print help message and exit\n --output DIR set the output directory [default: logs]\n\"\"\"\n\nfrom docopt import docopt\nimport paramiko\nimport os\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username=\"ec2-user\", pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f\"{log_dir}/{l}\")\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n\n for ip in open(args['<ip-file>']):\n os.system(f\"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}\")\n #get_logs(ip.strip(), args['<pem-file>'], args['--output'])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
{'variables': {'node_shared_openssl%': 'true'}, 'targets': [{'target_name':
'keypair', 'sources': ['secp256k1/keypair.cc'], 'conditions': [[
'OS=="win"', {'conditions': [['target_arch=="x64"', {'variables': {
'openssl_root%': 'C:/OpenSSL-Win64'}}, {'variables': {'openssl_root%':
'C:/OpenSSL-Win32'}}]], 'libraries': [
'-l<(openssl_root)/lib/libeay32.lib'], 'include_dirs': [
'<(openssl_root)/include']}, {'conditions': [[
'node_shared_openssl=="false"', {'include_dirs': [
'<(node_root_dir)/deps/openssl/openssl/include'], 'conditions': [[
"target_arch=='ia32'", {'include_dirs': [
'<(node_root_dir)/deps/openssl/config/piii']}], ["target_arch=='x64'",
{'include_dirs': ['<(node_root_dir)/deps/openssl/config/k8']}], [
"target_arch=='arm'", {'include_dirs': [
'<(node_root_dir)/deps/openssl/config/arm']}]]}]]}]]}]}
<|reserved_special_token_1|>
{
'variables': {
'node_shared_openssl%': 'true'
},
'targets': [
{
'target_name': 'keypair',
'sources': [
'secp256k1/keypair.cc'
],
'conditions': [
# For Windows, require either a 32-bit or 64-bit
# separately-compiled OpenSSL library.
# Currently set up to use with the following OpenSSL distro:
#
# http://slproweb.com/products/Win32OpenSSL.html
[
'OS=="win"',
{
'conditions':
[
[
'target_arch=="x64"',
{
'variables': {
'openssl_root%': 'C:/OpenSSL-Win64'
},
}, {
'variables': {
'openssl_root%': 'C:/OpenSSL-Win32'
}
}
]
],
'libraries': [
'-l<(openssl_root)/lib/libeay32.lib',
],
'include_dirs': [
'<(openssl_root)/include',
],
},
# Otherwise, if not Windows, link against the exposed OpenSSL
# in Node.
{
"conditions": [
['node_shared_openssl=="false"', {
# so when "node_shared_openssl" is "false", then OpenSSL has been
# bundled into the node executable. So we need to include the same
# header files that were used when building node.
'include_dirs': [
'<(node_root_dir)/deps/openssl/openssl/include'
],
"conditions" : [
["target_arch=='ia32'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/piii" ]
}],
["target_arch=='x64'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/k8" ]
}],
["target_arch=='arm'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/arm" ]
}]
]
}]
]}
]]
}
]
}
|
flexible
|
{
"blob_id": "e7b30353fd25beb9d5cdeee688e4ffa6955d4221",
"index": 8437,
"step-1": "<mask token>\n",
"step-2": "{'variables': {'node_shared_openssl%': 'true'}, 'targets': [{'target_name':\n 'keypair', 'sources': ['secp256k1/keypair.cc'], 'conditions': [[\n 'OS==\"win\"', {'conditions': [['target_arch==\"x64\"', {'variables': {\n 'openssl_root%': 'C:/OpenSSL-Win64'}}, {'variables': {'openssl_root%':\n 'C:/OpenSSL-Win32'}}]], 'libraries': [\n '-l<(openssl_root)/lib/libeay32.lib'], 'include_dirs': [\n '<(openssl_root)/include']}, {'conditions': [[\n 'node_shared_openssl==\"false\"', {'include_dirs': [\n '<(node_root_dir)/deps/openssl/openssl/include'], 'conditions': [[\n \"target_arch=='ia32'\", {'include_dirs': [\n '<(node_root_dir)/deps/openssl/config/piii']}], [\"target_arch=='x64'\",\n {'include_dirs': ['<(node_root_dir)/deps/openssl/config/k8']}], [\n \"target_arch=='arm'\", {'include_dirs': [\n '<(node_root_dir)/deps/openssl/config/arm']}]]}]]}]]}]}\n",
"step-3": "{\n 'variables': {\n 'node_shared_openssl%': 'true'\n },\n 'targets': [\n {\n 'target_name': 'keypair',\n 'sources': [\n 'secp256k1/keypair.cc'\n ],\n 'conditions': [\n # For Windows, require either a 32-bit or 64-bit\n # separately-compiled OpenSSL library.\n\t# Currently set up to use with the following OpenSSL distro:\n\t#\n\t# http://slproweb.com/products/Win32OpenSSL.html\n [\n\t 'OS==\"win\"', \n\t {\n 'conditions':\n\t [\n [\n\t 'target_arch==\"x64\"',\n\t {\n\t 'variables': {\n 'openssl_root%': 'C:/OpenSSL-Win64'\n },\n }, {\n 'variables': {\n 'openssl_root%': 'C:/OpenSSL-Win32'\n }\n\t\t}\n\t ]\n ],\n 'libraries': [ \n '-l<(openssl_root)/lib/libeay32.lib',\n ],\n 'include_dirs': [\n '<(openssl_root)/include',\n ],\n },\n\n\n # Otherwise, if not Windows, link against the exposed OpenSSL\n\t # in Node.\n {\n \"conditions\": [\n ['node_shared_openssl==\"false\"', {\n # so when \"node_shared_openssl\" is \"false\", then OpenSSL has been\n # bundled into the node executable. So we need to include the same\n # header files that were used when building node.\n 'include_dirs': [\n '<(node_root_dir)/deps/openssl/openssl/include'\n ],\n \"conditions\" : [\n [\"target_arch=='ia32'\", {\n \"include_dirs\": [ \"<(node_root_dir)/deps/openssl/config/piii\" ]\n }],\n [\"target_arch=='x64'\", {\n \"include_dirs\": [ \"<(node_root_dir)/deps/openssl/config/k8\" ]\n }],\n [\"target_arch=='arm'\", {\n \"include_dirs\": [ \"<(node_root_dir)/deps/openssl/config/arm\" ]\n }]\n ]\n }]\n ]}\n ]]\n }\n ]\n}\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from tdm.lib.device import DddDevice, DeviceAction, DeviceWHQuery, Validity
class CallJohnDevice(DddDevice):
class MakeCall(DeviceAction):
def perform(self, select_contact, select_number):
contact = self.device.CONTACTS.get(select_contact)
number_type = self.device.CONTACTS.get(select_number)
return True
class contact_lookup(DeviceWHQuery):
def perform(self, select_contact, select_number):
#print("Looking up {}".format(select_contact))
number = self.device.PHONE_NUMBERS.get(select_contact).get(select_number)
#print(number)
return [number]
class PhoneNumberAvailable(Validity):
def is_valid(self, select_contact):
#print(self.device.CONTACTS.values())
if self.device.PHONE_NUMBERS.get(select_contact) == None:
#print("{} is not in contacts".format(select_contact))
return False
else:
#print("{} is in contacts".format(select_contact))
return True
JOHN = "contact_john"
LISA = "contact_lisa"
MARY = "contact_mary"
ANDY = "contact_andy"
MOBILE = "mobile"
WORK = "work"
HOME = "home"
PHONE_NUMBERS = {
JOHN: {
MOBILE: "0701234567",
WORK: "0736582934",
HOME: "031122363"
},
LISA: {
MOBILE: "0709876543",
WORK: "0763559230",
HOME: "031749205"
},
MARY: {
MOBILE: "0706574839",
WORK: "0784736475",
HOME: "031847528"
},
ANDY: None
}
CONTACTS = {
"John": JOHN,
"Lisa": LISA,
"Mary": MARY,
"Andy": ANDY,
}
|
normal
|
{
"blob_id": "1dd235ecfe577b508d0777e8c70026114aeb154f",
"index": 6648,
"step-1": "from tdm.lib.device import DddDevice, DeviceAction, DeviceWHQuery, Validity\r\n\r\n\r\nclass CallJohnDevice(DddDevice):\r\n\r\n class MakeCall(DeviceAction):\r\n def perform(self, select_contact, select_number):\r\n contact = self.device.CONTACTS.get(select_contact)\r\n number_type = self.device.CONTACTS.get(select_number)\r\n return True\r\n\r\n class contact_lookup(DeviceWHQuery):\r\n def perform(self, select_contact, select_number):\r\n #print(\"Looking up {}\".format(select_contact))\r\n number = self.device.PHONE_NUMBERS.get(select_contact).get(select_number)\r\n #print(number)\r\n\t return [number]\r\n\r\n class PhoneNumberAvailable(Validity):\r\n def is_valid(self, select_contact):\r\n #print(self.device.CONTACTS.values())\r\n if self.device.PHONE_NUMBERS.get(select_contact) == None:\r\n #print(\"{} is not in contacts\".format(select_contact))\r\n\t\treturn False\r\n else:\r\n #print(\"{} is in contacts\".format(select_contact))\r\n return True\r\n\r\n JOHN = \"contact_john\"\r\n LISA = \"contact_lisa\"\r\n MARY = \"contact_mary\"\r\n ANDY = \"contact_andy\"\r\n\r\n MOBILE = \"mobile\"\r\n WORK = \"work\"\r\n HOME = \"home\"\r\n\r\n PHONE_NUMBERS = {\r\n JOHN: {\r\n MOBILE: \"0701234567\",\r\n WORK: \"0736582934\",\r\n HOME: \"031122363\"\r\n },\r\n LISA: {\r\n MOBILE: \"0709876543\",\r\n WORK: \"0763559230\",\r\n HOME: \"031749205\"\r\n },\r\n MARY: {\r\n MOBILE: \"0706574839\",\r\n WORK: \"0784736475\",\r\n HOME: \"031847528\"\r\n },\r\n ANDY: None\r\n }\r\n\r\n CONTACTS = {\r\n \"John\": JOHN,\r\n \"Lisa\": LISA,\r\n \"Mary\": MARY,\r\n \"Andy\": ANDY,\r\n }\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE
"""
測試 抓取 CROWDCUBE
"""
class SpiderForCROWDCUBETest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
self.spider = SpiderForCROWDCUBE()
self.spider.initDriver()
#收尾
def tearDown(self):
self.spider.quitDriver()
"""
#測試註冊帳號
def test_registerAccount(self):
logging.info("SpiderForCROWDCUBETest.test_registerAccount")
self.spider.registerAccount()
#測試登入帳號
def test_loginAccount(self):
logging.info("SpiderForCROWDCUBETest.test_loginAccount")
self.spider.loginAccount()
#測試抓取 companies page
def test_downloadCompaniesPage(self):
logging.info("SpiderForCROWDCUBETest.test_downloadCompaniesPage")
self.spider.downloadCompaniesPage()
"""
#測試抓取 company page
def test_downloadCompanyPage(self):
logging.info("SpiderForCROWDCUBETest.test_downloadCompanyPage")
self.spider.downloadCompanyPage()
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
|
normal
|
{
"blob_id": "45856b4c5cbf1d3b414ad769135b2d974bc0a22b",
"index": 7120,
"step-1": "<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n <mask token>\n <mask token>\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n\n def tearDown(self):\n self.spider.quitDriver()\n <mask token>\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n\n def tearDown(self):\n self.spider.quitDriver()\n \"\"\"\n #測試註冊帳號\n def test_registerAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_registerAccount\")\n self.spider.registerAccount()\n \n #測試登入帳號\n def test_loginAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_loginAccount\")\n self.spider.loginAccount()\n \n #測試抓取 companies page\n def test_downloadCompaniesPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompaniesPage\")\n self.spider.downloadCompaniesPage()\n \"\"\"\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\nif __name__ == '__main__':\n unittest.main(exit=False)\n",
"step-4": "<mask token>\nimport unittest\nimport logging\nfrom cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE\n<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n\n def tearDown(self):\n self.spider.quitDriver()\n \"\"\"\n #測試註冊帳號\n def test_registerAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_registerAccount\")\n self.spider.registerAccount()\n \n #測試登入帳號\n def test_loginAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_loginAccount\")\n self.spider.loginAccount()\n \n #測試抓取 companies page\n def test_downloadCompaniesPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompaniesPage\")\n self.spider.downloadCompaniesPage()\n \"\"\"\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\nif __name__ == '__main__':\n unittest.main(exit=False)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (C) 2015, MuChu Hsu\nContributed by Muchu Hsu (muchu1983@gmail.com)\nThis file is part of BSD license\n\n<https://opensource.org/licenses/BSD-3-Clause>\n\"\"\"\nimport unittest\nimport logging\nfrom cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE\n\"\"\"\n測試 抓取 CROWDCUBE\n\"\"\"\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n #準備\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n \n #收尾\n def tearDown(self):\n self.spider.quitDriver()\n \"\"\"\n #測試註冊帳號\n def test_registerAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_registerAccount\")\n self.spider.registerAccount()\n \n #測試登入帳號\n def test_loginAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_loginAccount\")\n self.spider.loginAccount()\n \n #測試抓取 companies page\n def test_downloadCompaniesPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompaniesPage\")\n self.spider.downloadCompaniesPage()\n \"\"\"\n #測試抓取 company page\n def test_downloadCompanyPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompanyPage\")\n self.spider.downloadCompanyPage()\n \n#測試開始\nif __name__ == \"__main__\":\n unittest.main(exit=False)\n\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
# ------------------------------------#
# Title: Mailroom Part 1
# Dev: SChang
# Date: Feb 2nd, 2019
# ChangeLog: (Who, When, What)
# SChang,02/02/2019, Created Script
# ------------------------------------#
import os
import sys
import math
donor_list = {"William Gates": [1010, 2020, 3030],
"Mark Zuckerberg": [5500, 4400],
"Jeff Bezos": [6745, 2345, 3845],
"Paul Allen": [9999, 8888, 7777]
}
# function for sending either adding new donor or checking against donor list
def send_ty():
DonorName = "list"
while DonorName == "list":
DonorName = input(""""Provide Donor Full Name, or type: "List" to display a list of all donors => """)
if DonorName.lower().strip() == "list":
view_donors()
continue
if DonorName[:1].lower() == "e":
return None
DonorName = DonorName.strip()
donor_amount = ask_donation_amount(DonorName)
if donor_amount is None:
return None
append_donation(DonorName, donor_amount)
print(ty_letter(DonorName, donor_amount), end='\n\n')
# function that recognizes name and donation amount which is passed through the send_ty function for print
def ty_letter(name,amount):
return f"""
Thank you, {name} for donating ${amount:.2f}"""
# function that is passed through send_ty function defined by donor_amount
def ask_donation_amount(name):
response = input(f"How much did {name} donate? ")
if response [:1].lower() == 'e':
return None
return float(response)
# function appending name/amount to the donor list if new
def append_donation(name, amount):
donor_list.setdefault(name, []).append(amount)
# viewing list of donors if "List" is entered from menu
def view_donors():
for donor in donor_list:
print(f"{donor}")
def report_sort(item):
return item[1]
# function for report that is formatted with donor information
def create_report():
print()
print("{:<20}| Total Given | Num Gifts | Average Gift".format("Donor Name"))
print("-" * 60)
for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):
print("{:<21}${:>11.2f}{:>12} ${:>12.2f}".format(d, sum(v), len(v),
sum(v) / len(v)))
# function for exit option off menu
def exit_program ():
print("Program Exited!")
sys.exit()
def main():
menu_dict = {
"1": send_ty,
"2": create_report,
"3": exit_program
}
prompt_menu = "\n".join(("",
"Charity Management Application",
"Please choose from below options:",
"",
"1 - Send a Thank You",
"2 - Create a Report",
"3 - Exit",
">>> "))
while True:
response = input(prompt_menu)
menu_dict[response]()
if __name__ == "__main__":
# Guards against code running automatically if module is imported
main()
|
normal
|
{
"blob_id": "f2292d1816699392663bdbf7a06c334de3b2022c",
"index": 7118,
"step-1": "<mask token>\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\n<mask token>\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\ndef view_donors():\n for donor in donor_list:\n print(f'{donor}')\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\ndef main():\n menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}\n prompt_menu = '\\n'.join(('', 'Charity Management Application',\n 'Please choose from below options:', '', '1 - Send a Thank You',\n '2 - Create a Report', '3 - Exit', '>>> '))\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\ndef view_donors():\n for donor in donor_list:\n print(f'{donor}')\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\ndef main():\n menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}\n prompt_menu = '\\n'.join(('', 'Charity Management Application',\n 'Please choose from below options:', '', '1 - Send a Thank You',\n '2 - Create a Report', '3 - Exit', '>>> '))\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport math\ndonor_list = {'William Gates': [1010, 2020, 3030], 'Mark Zuckerberg': [5500,\n 4400], 'Jeff Bezos': [6745, 2345, 3845], 'Paul Allen': [9999, 8888, 7777]}\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\ndef view_donors():\n for donor in donor_list:\n print(f'{donor}')\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\ndef main():\n menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}\n prompt_menu = '\\n'.join(('', 'Charity Management Application',\n 'Please choose from below options:', '', '1 - Send a Thank You',\n '2 - Create a Report', '3 - Exit', '>>> '))\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# ------------------------------------#\n# Title: Mailroom Part 1\n# Dev: SChang\n# Date: Feb 2nd, 2019\n# ChangeLog: (Who, When, What)\n# SChang,02/02/2019, Created Script\n# ------------------------------------#\nimport os\nimport sys\nimport math\n\ndonor_list = {\"William Gates\": [1010, 2020, 3030],\n \"Mark Zuckerberg\": [5500, 4400],\n \"Jeff Bezos\": [6745, 2345, 3845],\n \"Paul Allen\": [9999, 8888, 7777]\n }\n\n\n# function for sending either adding new donor or checking against donor list\ndef send_ty():\n DonorName = \"list\"\n while DonorName == \"list\":\n DonorName = input(\"\"\"\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => \"\"\")\n if DonorName.lower().strip() == \"list\":\n view_donors()\n continue\n if DonorName[:1].lower() == \"e\":\n return None\n\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\n# function that recognizes name and donation amount which is passed through the send_ty function for print\ndef ty_letter(name,amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\n# function that is passed through send_ty function defined by donor_amount\ndef ask_donation_amount(name):\n response = input(f\"How much did {name} donate? \")\n if response [:1].lower() == 'e':\n return None\n return float(response)\n\n\n# function appending name/amount to the donor list if new\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\n# viewing list of donors if \"List\" is entered from menu\ndef view_donors():\n for donor in donor_list:\n print(f\"{donor}\")\n\n\ndef report_sort(item):\n return item[1]\n\n\n# function for report that is formatted with donor information\ndef create_report():\n print()\n print(\"{:<20}| Total Given | Num Gifts | Average Gift\".format(\"Donor Name\"))\n print(\"-\" * 60)\n\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print(\"{:<21}${:>11.2f}{:>12} ${:>12.2f}\".format(d, sum(v), len(v),\n\n sum(v) / len(v)))\n\n\n# function for exit option off menu\ndef exit_program ():\n print(\"Program Exited!\")\n sys.exit()\n\n\ndef main():\n menu_dict = {\n \"1\": send_ty,\n \"2\": create_report,\n \"3\": exit_program\n }\n\n prompt_menu = \"\\n\".join((\"\",\n \"Charity Management Application\",\n \"Please choose from below options:\",\n \"\",\n \"1 - Send a Thank You\",\n \"2 - Create a Report\",\n \"3 - Exit\",\n \">>> \"))\n\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\nif __name__ == \"__main__\":\n # Guards against code running automatically if module is imported\n main()\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
file_open = open("C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv", 'r', encoding='UTF8')
save_file = open("C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv", 'w', encoding='UTF8',newline='')
write = csv.writer(save_file)
hidden_states = ['up', 'down']
pi = [0.5044, 0.4956]
state_space = pd.Series(pi, index=hidden_states, name='states')
print(state_space)
print('\n', state_space.sum())
stack = 0
x_a = ""
x_b = ""
y_a = ""
y_b = ""
before_application = ""
add = []
def count(a,b):
a = int(a)
b = int(b)
if a == 0 and b == 0:
return 0
elif a == 0 and b == 1:
return 1
elif a == 1 and b == 0:
return 2
elif a == 1 and b == 1:
return 3
while True:
line = file_open.readline()
if not line: break
result_x = []
result_y = []
add = []
if stack == 0:
a = line.split(',')[0]
a = a.strip()
add.append(a)
a = line.split(',')[1]
a = a.strip()
add.append(a)
a = line.split(',')[2]
a = a.strip()
add.append(a)
write.writerow(add)
stack = 1
elif stack == 1:
before_application = line.split(',')[0]
x_a = line.split(',')[1]
x_a = x_a.strip()
y_a = line.split(',')[2]
y_a = y_a.strip()
stack = 2
elif stack == 2:
if before_application == line.split(',')[0]:
x_b = line.split(',')[1]
x_b = x_b.strip()
y_b = line.split(',')[2]
y_b = y_b.strip()
result_x.append(x_a)
result_x.append(x_b)
result_y.append(y_a)
result_y.append(y_b)
tol = count(result_x[0],result_x[1])
add.append(tol)
tol = count(result_y[0], result_y[1])
add.append(tol)
write.writerow(add)
stack = 3
else:
pass
before_application = line.split(',')[0]
elif stack == 3:
if before_application == line.split(',')[0]:
x_a = line.split(',')[1]
x_a = x_a.strip()
y_a = line.split(',')[2]
y_a = y_a.strip()
result_x.append(x_b)
result_x.append(x_a)
result_y.append(y_b)
result_y.append(y_a)
tol = count(result_x[0],result_x[1])
add.append(tol)
tol = count(result_y[0], result_y[1])
add.append(tol)
write.writerow(add)
stack = 2
else:
pass
before_application = line.split(',')[0]
|
normal
|
{
"blob_id": "55977a673bb36900e1d797cb9ec330ce6d9aa717",
"index": 8232,
"step-1": "<mask token>\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(state_space)\nprint('\\n', state_space.sum())\n<mask token>\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\nwhile True:\n line = file_open.readline()\n if not line:\n break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n",
"step-3": "<mask token>\nfile_open = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv',\n 'r', encoding='UTF8')\nsave_file = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv',\n 'w', encoding='UTF8', newline='')\nwrite = csv.writer(save_file)\nhidden_states = ['up', 'down']\npi = [0.5044, 0.4956]\nstate_space = pd.Series(pi, index=hidden_states, name='states')\nprint(state_space)\nprint('\\n', state_space.sum())\nstack = 0\nx_a = ''\nx_b = ''\ny_a = ''\ny_b = ''\nbefore_application = ''\nadd = []\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\nwhile True:\n line = file_open.readline()\n if not line:\n break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\nfile_open = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv',\n 'r', encoding='UTF8')\nsave_file = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv',\n 'w', encoding='UTF8', newline='')\nwrite = csv.writer(save_file)\nhidden_states = ['up', 'down']\npi = [0.5044, 0.4956]\nstate_space = pd.Series(pi, index=hidden_states, name='states')\nprint(state_space)\nprint('\\n', state_space.sum())\nstack = 0\nx_a = ''\nx_b = ''\ny_a = ''\ny_b = ''\nbefore_application = ''\nadd = []\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\nwhile True:\n line = file_open.readline()\n if not line:\n break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\n\nfile_open = open(\"C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv\", 'r', encoding='UTF8')\nsave_file = open(\"C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv\", 'w', encoding='UTF8',newline='')\nwrite = csv.writer(save_file)\n\nhidden_states = ['up', 'down']\npi = [0.5044, 0.4956]\nstate_space = pd.Series(pi, index=hidden_states, name='states')\nprint(state_space)\nprint('\\n', state_space.sum())\n\nstack = 0\nx_a = \"\"\nx_b = \"\"\n\ny_a = \"\"\ny_b = \"\"\nbefore_application = \"\"\nadd = []\ndef count(a,b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\nwhile True:\n line = file_open.readline()\n if not line: break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0],result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n\n tol = count(result_x[0],result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.conf.urls.defaults import *
#from wiki.feeds import *
from django.conf import settings
from django.conf.urls.defaults import *
# feeds for wikiPages and wikiNews
"""
feeds = {
'latestpages': LatestPages,
}
sitemaps = {
'wiki': Wiki,
}
"""
urlpatterns = patterns('',
# Example:
# (r'^goimcommunity/', include('goimcommunity.apps.foo.urls.foo')),
# Uncomment this for admin:
(r'^admin/', include('django.contrib.admin.urls')),
(r'^polls/', include('goimcommunity.polls.urls')),
(r'^league/', include('goimcommunity.leaguesystem.urls')),
(r'^board/', include('sphene.sphboard.urls')),
(r'^rewrite/(?P<groupName>\w+)/board/', include('sphene.sphboard.urls'), {'urlPrefix': '' }),
(r'^rewrite/(?P<groupName>\w+)/wiki/', include('sphene.sphwiki.urls'), {'urlPrefix': '' }),
(r'^rewrite/\w+/accounts/login/$', 'django.contrib.auth.views.login'),
(r'^rewrite/\w+/accounts/logout/$', 'django.contrib.auth.views.logout' ),
(r'^(?P<urlPrefix>test/(?P<groupName>\w+))/board/', include('sphene.sphboard.urls')),
(r'^(?P<urlPrefix>test/(?P<groupName>\w+))/wiki/', include('sphene.sphwiki.urls')),
(r'^wiki/', include('sphene.sphwiki.urls'), { 'urlPrefix': 'wiki', 'groupName': 'Sphene' }),
(r'^static/sphene/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../../communitytools/static/sphene' }),
(r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../static' }),
(r'^site_media/(.*)$', 'django.views.static.serve', {'document_root': '/home/kahless/dev/python/diamanda/media'}), # change it or remove if not on dev server
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
(r'^accounts/logout/$','django.contrib.auth.views.logout'),
(r'^accounts/register/$', 'sphene.community.views.register' ),
# (r'^forum/', include('myghtyboard.URLconf')), # forum
# (r'^muh/', 'wiki.views.show_page'), # wiki main page under /
# (r'^wiki/', include('wiki.URLconf')), # wiki
# (r'^wiki/feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}), # wiki feeds
# (r'^wiki/sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}), # wikiPages sitemap
)
|
normal
|
{
"blob_id": "f44ff7488ae8fc64bc1785fb6cbe80c4cc011fbe",
"index": 6808,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', ('^admin/', include('django.contrib.admin.urls')\n ), ('^polls/', include('goimcommunity.polls.urls')), ('^league/',\n include('goimcommunity.leaguesystem.urls')), ('^board/', include(\n 'sphene.sphboard.urls')), ('^rewrite/(?P<groupName>\\\\w+)/board/',\n include('sphene.sphboard.urls'), {'urlPrefix': ''}), (\n '^rewrite/(?P<groupName>\\\\w+)/wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': ''}), ('^rewrite/\\\\w+/accounts/login/$',\n 'django.contrib.auth.views.login'), ('^rewrite/\\\\w+/accounts/logout/$',\n 'django.contrib.auth.views.logout'), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/board/', include(\n 'sphene.sphboard.urls')), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/wiki/', include(\n 'sphene.sphwiki.urls')), ('^wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': 'wiki', 'groupName': 'Sphene'}), ('^static/sphene/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../../communitytools/static/sphene'}), ('^static/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../static'}), ('^site_media/(.*)$', 'django.views.static.serve', {\n 'document_root': '/home/kahless/dev/python/diamanda/media'}), (\n '^accounts/login/$', 'django.contrib.auth.views.login'), (\n '^accounts/logout/$', 'django.contrib.auth.views.logout'), (\n '^accounts/register/$', 'sphene.community.views.register'))\n",
"step-3": "from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.conf.urls.defaults import *\n<mask token>\nurlpatterns = patterns('', ('^admin/', include('django.contrib.admin.urls')\n ), ('^polls/', include('goimcommunity.polls.urls')), ('^league/',\n include('goimcommunity.leaguesystem.urls')), ('^board/', include(\n 'sphene.sphboard.urls')), ('^rewrite/(?P<groupName>\\\\w+)/board/',\n include('sphene.sphboard.urls'), {'urlPrefix': ''}), (\n '^rewrite/(?P<groupName>\\\\w+)/wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': ''}), ('^rewrite/\\\\w+/accounts/login/$',\n 'django.contrib.auth.views.login'), ('^rewrite/\\\\w+/accounts/logout/$',\n 'django.contrib.auth.views.logout'), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/board/', include(\n 'sphene.sphboard.urls')), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/wiki/', include(\n 'sphene.sphwiki.urls')), ('^wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': 'wiki', 'groupName': 'Sphene'}), ('^static/sphene/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../../communitytools/static/sphene'}), ('^static/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../static'}), ('^site_media/(.*)$', 'django.views.static.serve', {\n 'document_root': '/home/kahless/dev/python/diamanda/media'}), (\n '^accounts/login/$', 'django.contrib.auth.views.login'), (\n '^accounts/logout/$', 'django.contrib.auth.views.logout'), (\n '^accounts/register/$', 'sphene.community.views.register'))\n",
"step-4": "from django.conf.urls.defaults import *\n#from wiki.feeds import *\nfrom django.conf import settings\n\nfrom django.conf.urls.defaults import *\n# feeds for wikiPages and wikiNews\n\"\"\"\nfeeds = {\n 'latestpages': LatestPages,\n}\n\nsitemaps = {\n\t'wiki': Wiki,\n\t}\n\"\"\"\nurlpatterns = patterns('',\n # Example:\n # (r'^goimcommunity/', include('goimcommunity.apps.foo.urls.foo')),\n\n # Uncomment this for admin:\n (r'^admin/', include('django.contrib.admin.urls')),\n\n (r'^polls/', include('goimcommunity.polls.urls')),\n\t\t (r'^league/', include('goimcommunity.leaguesystem.urls')),\n\n (r'^board/', include('sphene.sphboard.urls')),\n (r'^rewrite/(?P<groupName>\\w+)/board/', include('sphene.sphboard.urls'), {'urlPrefix': '' }),\n (r'^rewrite/(?P<groupName>\\w+)/wiki/', include('sphene.sphwiki.urls'), {'urlPrefix': '' }),\n\t\t (r'^rewrite/\\w+/accounts/login/$', 'django.contrib.auth.views.login'),\n\t\t (r'^rewrite/\\w+/accounts/logout/$', 'django.contrib.auth.views.logout' ),\n (r'^(?P<urlPrefix>test/(?P<groupName>\\w+))/board/', include('sphene.sphboard.urls')),\n (r'^(?P<urlPrefix>test/(?P<groupName>\\w+))/wiki/', include('sphene.sphwiki.urls')),\n\n (r'^wiki/', include('sphene.sphwiki.urls'), { 'urlPrefix': 'wiki', 'groupName': 'Sphene' }),\n\n\n (r'^static/sphene/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../../communitytools/static/sphene' }),\n\t\t (r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../static' }),\n\n\n\n (r'^site_media/(.*)$', 'django.views.static.serve', {'document_root': '/home/kahless/dev/python/diamanda/media'}), # change it or remove if not on dev server\n\n (r'^accounts/login/$', 'django.contrib.auth.views.login'),\n (r'^accounts/logout/$','django.contrib.auth.views.logout'),\n (r'^accounts/register/$', 'sphene.community.views.register' ),\n \n\n# (r'^forum/', include('myghtyboard.URLconf')), # forum\n# (r'^muh/', 'wiki.views.show_page'), # wiki main page under /\n# (r'^wiki/', include('wiki.URLconf')), # wiki\n# (r'^wiki/feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}), # wiki feeds\n# (r'^wiki/sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}), # wikiPages sitemap\n\n\n \n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(b)
print(c)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)
b = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)
c = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)
print(b)
print(c)
<|reserved_special_token_1|>
import hashlib
a = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)
b = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)
c = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)
print(b)
print(c)
<|reserved_special_token_1|>
import hashlib
a = hashlib.pbkdf2_hmac("sha256", b"hallo", b"salt", 1)
b = hashlib.pbkdf2_hmac("sha256", a, b"salt", 1)
c = hashlib.pbkdf2_hmac("sha256", b"hallo", b"salt", 2)
print(b)
print(c)
|
flexible
|
{
"blob_id": "20ac73789fa7297a9230a6a2b814349d2b7da5fb",
"index": 1851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(b)\nprint(c)\n",
"step-3": "<mask token>\na = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)\nb = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)\nc = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)\nprint(b)\nprint(c)\n",
"step-4": "import hashlib\na = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)\nb = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)\nc = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)\nprint(b)\nprint(c)\n",
"step-5": "import hashlib\r\na = hashlib.pbkdf2_hmac(\"sha256\", b\"hallo\", b\"salt\", 1)\r\nb = hashlib.pbkdf2_hmac(\"sha256\", a, b\"salt\", 1)\r\nc = hashlib.pbkdf2_hmac(\"sha256\", b\"hallo\", b\"salt\", 2)\r\nprint(b)\r\nprint(c)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from unittest import TestCase
from spiral.spiral_matrix import SpiralMatrix
class TestOutwardCounterClockwise(TestCase):
def test_traverse_empty(self):
matrix = []
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([], actual)
def test_traverse_empty_vector(self):
matrix = [[]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([], actual)
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([1], actual)
def test_traverse_row_vector(self):
matrix = [[1, 2, 3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_column_vector(self):
matrix = [
[1],
[2],
[3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_even_square(self):
matrix = [
[1, 2],
[3, 4]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 4, 2, 1], actual)
def test_traverse_odd_square(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)
def test_traverse_wide_odd_height_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_wide_even_height_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[5, 6, 7, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_even_width_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, 16, 12, 8, 4, 3, 2, 1],
actual)
def test_traverse_tall_odd_width_rectangle(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[8, 5, 4, 7, 10, 11, 12,9, 6, 3, 2, 1], actual)
def test_traverse_large_matrix(self):
matrix = [[i * 1000 + j for j in range(0, 1000)]
for i in range(0, 1000)]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])
self.assertEqual([3, 2, 1, 0],
actual[-4:])
|
normal
|
{
"blob_id": "84f6336261e1c276f029822754842514715791df",
"index": 3604,
"step-1": "<mask token>\n\n\nclass TestOutwardCounterClockwise(TestCase):\n <mask token>\n <mask token>\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n <mask token>\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n <mask token>\n <mask token>\n <mask token>\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n <mask token>\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestOutwardCounterClockwise(TestCase):\n <mask token>\n <mask token>\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [[1, 2], [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 4, 2, 1], actual)\n <mask token>\n <mask token>\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n <mask token>\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestOutwardCounterClockwise(TestCase):\n\n def test_traverse_empty(self):\n matrix = []\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([], actual)\n <mask token>\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [[1, 2], [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 4, 2, 1], actual)\n <mask token>\n\n def test_traverse_wide_odd_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_even_width_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15,\n 16], [17, 18, 19, 20]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, \n 16, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_large_matrix(self):\n matrix = [[(i * 1000 + j) for j in range(0, 1000)] for i in range(0,\n 1000)]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])\n self.assertEqual([3, 2, 1, 0], actual[-4:])\n",
"step-4": "from unittest import TestCase\nfrom spiral.spiral_matrix import SpiralMatrix\n\n\nclass TestOutwardCounterClockwise(TestCase):\n\n def test_traverse_empty(self):\n matrix = []\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([], actual)\n\n def test_traverse_empty_vector(self):\n matrix = [[]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([], actual)\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [[1], [2], [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [[1, 2], [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([3, 4, 2, 1], actual)\n\n def test_traverse_odd_square(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_wide_odd_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([5, 6, 7, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_even_width_rectangle(self):\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15,\n 16], [17, 18, 19, 20]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, \n 16, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([8, 5, 4, 7, 10, 11, 12, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_large_matrix(self):\n matrix = [[(i * 1000 + j) for j in range(0, 1000)] for i in range(0,\n 1000)]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=\n False)]\n self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])\n self.assertEqual([3, 2, 1, 0], actual[-4:])\n",
"step-5": "from unittest import TestCase\nfrom spiral.spiral_matrix import SpiralMatrix\n\n\nclass TestOutwardCounterClockwise(TestCase):\n def test_traverse_empty(self):\n matrix = []\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([], actual)\n\n def test_traverse_empty_vector(self):\n matrix = [[]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([], actual)\n\n def test_traverse_single_element(self):\n matrix = [[1]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([1], actual)\n\n def test_traverse_row_vector(self):\n matrix = [[1, 2, 3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_column_vector(self):\n matrix = [\n [1],\n [2],\n [3]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([3, 2, 1], actual)\n\n def test_traverse_even_square(self):\n matrix = [\n [1, 2],\n [3, 4]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([3, 4, 2, 1], actual)\n\n def test_traverse_odd_square(self):\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)\n\n def test_traverse_wide_odd_height_rectangle(self):\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_wide_even_height_rectangle(self):\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [5, 6, 7, 8, 4, 3, 2, 1], actual)\n\n def test_traverse_tall_even_width_rectangle(self):\n matrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n [17, 18, 19, 20]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, 16, 12, 8, 4, 3, 2, 1],\n actual)\n\n def test_traverse_tall_odd_width_rectangle(self):\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12]]\n actual = [\n i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual(\n [8, 5, 4, 7, 10, 11, 12,9, 6, 3, 2, 1], actual)\n\n def test_traverse_large_matrix(self):\n matrix = [[i * 1000 + j for j in range(0, 1000)]\n for i in range(0, 1000)]\n actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]\n self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])\n self.assertEqual([3, 2, 1, 0],\n actual[-4:])\n",
"step-ids": [
5,
7,
11,
14,
15
]
}
|
[
5,
7,
11,
14,
15
] |
import pymysql
import pymssql
import socket
import threading
from time import sleep
address = ('127.0.0.1', 20176)
usermode = {1: 'Wangcz_Students',
2: 'Wangcz_Teachers',
3: 'Wangcz_Admin'
}
def checkuser(username, password, cursor, user_db):
cursor.execute('''select * from %s WHERE username = %d AND password = %d''' % (user_db, int(username), int(password)))
return cursor.fetchall()
def tcplink(sock, addr):
conn = pymysql.connect()
cursor = conn.cursor()
while True:
bytedata = sock.recv(1024)
data = eval(bytedata.decode())
sleep(1)
if data:
if 'username' and 'password' and 'login_mode' in data.keys():
if checkuser(data['username'],data['password'],cursor=cursor, user_db=usermode[data[login_mode]]):
sock.send(b'Login success')#登陆成功
else:
sock.send(b'Error')#发送错误消息
else:
break
sock.close()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(address)
s.listen(10)
while True:
sock,addr = s.accept()
t = threading.Thread(target=tcplink,args=(sock,addr))
|
normal
|
{
"blob_id": "758e5b9a65132c4bdee4600e79c27f9c0f272312",
"index": 8308,
"step-1": "<mask token>\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n",
"step-3": "<mask token>\naddress = '127.0.0.1', 20176\nusermode = {(1): 'Wangcz_Students', (2): 'Wangcz_Teachers', (3): 'Wangcz_Admin'\n }\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n",
"step-4": "import pymysql\nimport pymssql\nimport socket\nimport threading\nfrom time import sleep\naddress = '127.0.0.1', 20176\nusermode = {(1): 'Wangcz_Students', (2): 'Wangcz_Teachers', (3): 'Wangcz_Admin'\n }\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n",
"step-5": "import pymysql\nimport pymssql\nimport socket\nimport threading\nfrom time import sleep\n\naddress = ('127.0.0.1', 20176)\nusermode = {1: 'Wangcz_Students',\n 2: 'Wangcz_Teachers',\n 3: 'Wangcz_Admin'\n }\n\ndef checkuser(username, password, cursor, user_db):\n\n cursor.execute('''select * from %s WHERE username = %d AND password = %d''' % (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'],data['password'],cursor=cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')#登陆成功\n else:\n sock.send(b'Error')#发送错误消息\n else:\n break\n\n sock.close()\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock,addr = s.accept()\n t = threading.Thread(target=tcplink,args=(sock,addr))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
import cgitb
import sys
from auth import is_admin
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
|
normal
|
{
"blob_id": "be9972d899a167a8ca2728960e55cda538793cc5",
"index": 1576,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-3": "import cgitb\nimport sys\nfrom auth import is_admin\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-4": "#!/usr/bin/env python3\nimport cgitb\nimport sys\n\nfrom auth import is_admin\n\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def randomizer(n, garrafa_vidro, lata_metal, copo_plastico, bola_papel,
maça_organico):
lixos = [garrafa_vidro, lata_metal, copo_plastico, bola_papel,
maça_organico]
return lixos[n]
|
flexible
|
{
"blob_id": "71a9c9b8f47dcfbecc154c44d5a72ddbd852145a",
"index": 328,
"step-1": "<mask token>\n",
"step-2": "def randomizer(n, garrafa_vidro, lata_metal, copo_plastico, bola_papel,\n maça_organico):\n lixos = [garrafa_vidro, lata_metal, copo_plastico, bola_papel,\n maça_organico]\n return lixos[n]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt
from twisted.internet.defer import inlineCallbacks
import numpy as np
from connection import connection
import pyqtgraph as pg
from pyqtgraph.SignalProxy import SignalProxy
import sys
import time
global harwareConfiguration
class graphingwidget(QtGui.QWidget):
SIGNALID = 104692
update_signal = pyqtSignal(list)
def __init__(self,reactor, configpath):
super(graphingwidget,self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self,evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText("{:d}".format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(),key =lambda x: x[1].channelnumber)
for i in range(0,17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ""
ticks.append((i+0.5,string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems ={'left':yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds,config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10),[1]*10,pen='w'))
self.figure.setYRange(0,17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True,y=True,alpha=0.4)
self.label = pg.TextItem(anchor=(0,1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list,int,list)
def do_sequence(self,sequence,timelength,steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key= lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1]+ apulse[2])['ms'])
yhigh = 0.75+adds.channelnumber
ylow = 0.25+adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0]+self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i]+self.timeoffset]*2 + [endtimes[i]+self.timeoffset]*2
if ydata[-1] == ylow:
ydata += [ylow,yhigh,yhigh,ylow]
else:
ydata += [yhigh,ylow,ylow,yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist,ydatalist)
def plot(self,xlist,ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata)>1:
self.figure.addItem(pg.PlotCurveItem(xdata,ydata,pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset,pen=pg.mkPen('r',style=Qt.DashLine)))
|
normal
|
{
"blob_id": "8173afbd82b8da04db4625ac686c0d052e65a21c",
"index": 9470,
"step-1": "<mask token>\n\n\nclass graphingwidget(QtGui.QWidget):\n <mask token>\n <mask token>\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-2": "<mask token>\n\n\nclass graphingwidget(QtGui.QWidget):\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-3": "<mask token>\nglobal harwareConfiguration\n\n\nclass graphingwidget(QtGui.QWidget):\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-4": "from PyQt4 import QtGui\nfrom PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt\nfrom twisted.internet.defer import inlineCallbacks\nimport numpy as np\nfrom connection import connection\nimport pyqtgraph as pg\nfrom pyqtgraph.SignalProxy import SignalProxy\nimport sys\nimport time\nglobal harwareConfiguration\n\n\nclass graphingwidget(QtGui.QWidget):\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-5": "from PyQt4 import QtGui\nfrom PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt\nfrom twisted.internet.defer import inlineCallbacks\nimport numpy as np\nfrom connection import connection\nimport pyqtgraph as pg\nfrom pyqtgraph.SignalProxy import SignalProxy\nimport sys\nimport time\n\nglobal harwareConfiguration\n\n\nclass graphingwidget(QtGui.QWidget):\n\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n def __init__(self,reactor, configpath):\n super(graphingwidget,self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n \n def mouseMoved(self,evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText(\"{:d}\".format(int(mousePoint.x())))\n \n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n \n\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(),key =lambda x: x[1].channelnumber)\n for i in range(0,17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = \"\"\n ticks.append((i+0.5,string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems ={'left':yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n \n for adds,config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10),[1]*10,pen='w'))\n self.figure.setYRange(0,17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True,y=True,alpha=0.4)\n self.label = pg.TextItem(anchor=(0,1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list,int,list) \n def do_sequence(self,sequence,timelength,steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key= lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1]+ apulse[2])['ms'])\n yhigh = 0.75+adds.channelnumber\n ylow = 0.25+adds.channelnumber\n \n if len(starttimes) < 0:\n xdata = [starttimes[0]+self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i]+self.timeoffset]*2 + [endtimes[i]+self.timeoffset]*2\n \n if ydata[-1] == ylow:\n ydata += [ylow,yhigh,yhigh,ylow]\n else:\n ydata += [yhigh,ylow,ylow,yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist,ydatalist)\n \n \n def plot(self,xlist,ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata)>1:\n self.figure.addItem(pg.PlotCurveItem(xdata,ydata,pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset,pen=pg.mkPen('r',style=Qt.DashLine)))\n\n ",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
n=int(input("Enter any int number:\n"))
x=1
while(x<13):
print(n ," x ", x ," = ", n*x)
x=x+1
|
normal
|
{
"blob_id": "a6c07146f1cbc766cd464dab620d1fb075759c12",
"index": 4213,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile x < 13:\n print(n, ' x ', x, ' = ', n * x)\n x = x + 1\n",
"step-3": "n = int(input('Enter any int number:\\n'))\nx = 1\nwhile x < 13:\n print(n, ' x ', x, ' = ', n * x)\n x = x + 1\n",
"step-4": "n=int(input(\"Enter any int number:\\n\"))\n\nx=1\nwhile(x<13):\n print(n ,\" x \", x ,\" = \", n*x)\n x=x+1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@user.route('/users/add', methods=['GET', 'POST'])
def add_user():
"""
load form page and add to the database
"""
if request.method == 'POST':
user = User(username=request.form['username'], password=request.
form['password'], is_admin=request.form.getlist('is_admin'))
try:
db.session.add(user)
db.session.commit()
flash('Utilizador adicionado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
except:
flash('Erro: username já existe.', 'danger')
return redirect(url_for('user.add_user'))
return render_template('user_add.html')
<|reserved_special_token_0|>
@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_user(id):
"""
Delete from database
"""
check_admin()
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
flash('Utilizador removido com sucesso.', 'success')
return redirect(url_for('user.list_users'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_admin():
"""
Prevent non-admins from accessing the page
"""
if not current_user.is_admin:
abort(403)
<|reserved_special_token_0|>
@user.route('/users/add', methods=['GET', 'POST'])
def add_user():
"""
load form page and add to the database
"""
if request.method == 'POST':
user = User(username=request.form['username'], password=request.
form['password'], is_admin=request.form.getlist('is_admin'))
try:
db.session.add(user)
db.session.commit()
flash('Utilizador adicionado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
except:
flash('Erro: username já existe.', 'danger')
return redirect(url_for('user.add_user'))
return render_template('user_add.html')
@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_user(id):
check_admin()
user = User.query.get_or_404(id)
if request.method == 'POST':
user.username = request.form['username']
user.password = request.form['password']
user.is_admin = request.form.getlist('is_admin')
db.session.commit()
flash('Utilizador alterado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
return render_template('user_edit.html', user=user)
@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_user(id):
"""
Delete from database
"""
check_admin()
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
flash('Utilizador removido com sucesso.', 'success')
return redirect(url_for('user.list_users'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_admin():
"""
Prevent non-admins from accessing the page
"""
if not current_user.is_admin:
abort(403)
@user.route('/users')
@login_required
def list_users():
"""
List all users ordered by latest
"""
check_admin()
results = User.query.order_by(-User.id)
return render_template('user_list.html', users=results)
@user.route('/users/add', methods=['GET', 'POST'])
def add_user():
"""
load form page and add to the database
"""
if request.method == 'POST':
user = User(username=request.form['username'], password=request.
form['password'], is_admin=request.form.getlist('is_admin'))
try:
db.session.add(user)
db.session.commit()
flash('Utilizador adicionado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
except:
flash('Erro: username já existe.', 'danger')
return redirect(url_for('user.add_user'))
return render_template('user_add.html')
@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_user(id):
check_admin()
user = User.query.get_or_404(id)
if request.method == 'POST':
user.username = request.form['username']
user.password = request.form['password']
user.is_admin = request.form.getlist('is_admin')
db.session.commit()
flash('Utilizador alterado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
return render_template('user_edit.html', user=user)
@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_user(id):
"""
Delete from database
"""
check_admin()
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
flash('Utilizador removido com sucesso.', 'success')
return redirect(url_for('user.list_users'))
<|reserved_special_token_1|>
from flask import abort, flash, redirect, render_template, url_for, request
from flask_login import current_user, login_required
from . import user
from .. import db
from models import User
def check_admin():
"""
Prevent non-admins from accessing the page
"""
if not current_user.is_admin:
abort(403)
@user.route('/users')
@login_required
def list_users():
"""
List all users ordered by latest
"""
check_admin()
results = User.query.order_by(-User.id)
return render_template('user_list.html', users=results)
@user.route('/users/add', methods=['GET', 'POST'])
def add_user():
"""
load form page and add to the database
"""
if request.method == 'POST':
user = User(username=request.form['username'], password=request.
form['password'], is_admin=request.form.getlist('is_admin'))
try:
db.session.add(user)
db.session.commit()
flash('Utilizador adicionado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
except:
flash('Erro: username já existe.', 'danger')
return redirect(url_for('user.add_user'))
return render_template('user_add.html')
@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_user(id):
check_admin()
user = User.query.get_or_404(id)
if request.method == 'POST':
user.username = request.form['username']
user.password = request.form['password']
user.is_admin = request.form.getlist('is_admin')
db.session.commit()
flash('Utilizador alterado com sucesso.', 'success')
return redirect(url_for('user.list_users'))
return render_template('user_edit.html', user=user)
@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_user(id):
"""
Delete from database
"""
check_admin()
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
flash('Utilizador removido com sucesso.', 'success')
return redirect(url_for('user.list_users'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from flask import abort, flash, redirect, render_template, url_for, request
from flask_login import current_user, login_required
from . import user
from .. import db
from models import User
def check_admin():
"""
Prevent non-admins from accessing the page
"""
if not current_user.is_admin:
abort(403)
@user.route('/users')
@login_required
def list_users():
"""
List all users ordered by latest
"""
check_admin()
results = User.query.order_by(-User.id)
return render_template('user_list.html', users=results)
#@login_required
@user.route('/users/add', methods=['GET', 'POST'])
def add_user():
"""
load form page and add to the database
"""
#check_admin()
# if form submit
if request.method == 'POST':
# create new user with UI form data
user = User(username=request.form['username'],
password=request.form['password'],
is_admin=request.form.getlist('is_admin'))
try:
# add user to the database
db.session.add(user)
db.session.commit()
# message to the UI
flash('Utilizador adicionado com sucesso.', 'success')
# redirect to the users page
return redirect(url_for('user.list_users'))
except:
# in case user name already exists
flash('Erro: username já existe.', 'danger')
return redirect(url_for('user.add_user'))
# load add user form template
return render_template('user_add.html')
@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_user(id):
check_admin()
# get user or error
user = User.query.get_or_404(id)
if request.method == 'POST':
# update user with UI form data
user.username = request.form['username']
user.password = request.form['password']
user.is_admin = request.form.getlist('is_admin')
# update user in database
db.session.commit()
# message to the UI
flash('Utilizador alterado com sucesso.', 'success')
# redirect to the users page
return redirect(url_for('user.list_users'))
return render_template('user_edit.html', user=user)
@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_user(id):
"""
Delete from database
"""
check_admin()
# get user or error
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
flash('Utilizador removido com sucesso.', 'success')
# redirect to the users page
return redirect(url_for('user.list_users'))
|
flexible
|
{
"blob_id": "9a6f4f0eac5d9e5b4b92fcb2d66d39df15b3b281",
"index": 6303,
"step-1": "<mask token>\n\n\n@user.route('/users/add', methods=['GET', 'POST'])\ndef add_user():\n \"\"\"\n load form page and add to the database\n \"\"\"\n if request.method == 'POST':\n user = User(username=request.form['username'], password=request.\n form['password'], is_admin=request.form.getlist('is_admin'))\n try:\n db.session.add(user)\n db.session.commit()\n flash('Utilizador adicionado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n except:\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n return render_template('user_add.html')\n\n\n<mask token>\n\n\n@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_user(id):\n \"\"\"\n Delete from database\n \"\"\"\n check_admin()\n user = User.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n flash('Utilizador removido com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n",
"step-2": "<mask token>\n\n\ndef check_admin():\n \"\"\"\n Prevent non-admins from accessing the page\n \"\"\"\n if not current_user.is_admin:\n abort(403)\n\n\n<mask token>\n\n\n@user.route('/users/add', methods=['GET', 'POST'])\ndef add_user():\n \"\"\"\n load form page and add to the database\n \"\"\"\n if request.method == 'POST':\n user = User(username=request.form['username'], password=request.\n form['password'], is_admin=request.form.getlist('is_admin'))\n try:\n db.session.add(user)\n db.session.commit()\n flash('Utilizador adicionado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n except:\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n return render_template('user_add.html')\n\n\n@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_user(id):\n check_admin()\n user = User.query.get_or_404(id)\n if request.method == 'POST':\n user.username = request.form['username']\n user.password = request.form['password']\n user.is_admin = request.form.getlist('is_admin')\n db.session.commit()\n flash('Utilizador alterado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n return render_template('user_edit.html', user=user)\n\n\n@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_user(id):\n \"\"\"\n Delete from database\n \"\"\"\n check_admin()\n user = User.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n flash('Utilizador removido com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n",
"step-3": "<mask token>\n\n\ndef check_admin():\n \"\"\"\n Prevent non-admins from accessing the page\n \"\"\"\n if not current_user.is_admin:\n abort(403)\n\n\n@user.route('/users')\n@login_required\ndef list_users():\n \"\"\"\n List all users ordered by latest\n \"\"\"\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)\n\n\n@user.route('/users/add', methods=['GET', 'POST'])\ndef add_user():\n \"\"\"\n load form page and add to the database\n \"\"\"\n if request.method == 'POST':\n user = User(username=request.form['username'], password=request.\n form['password'], is_admin=request.form.getlist('is_admin'))\n try:\n db.session.add(user)\n db.session.commit()\n flash('Utilizador adicionado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n except:\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n return render_template('user_add.html')\n\n\n@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_user(id):\n check_admin()\n user = User.query.get_or_404(id)\n if request.method == 'POST':\n user.username = request.form['username']\n user.password = request.form['password']\n user.is_admin = request.form.getlist('is_admin')\n db.session.commit()\n flash('Utilizador alterado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n return render_template('user_edit.html', user=user)\n\n\n@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_user(id):\n \"\"\"\n Delete from database\n \"\"\"\n check_admin()\n user = User.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n flash('Utilizador removido com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n",
"step-4": "from flask import abort, flash, redirect, render_template, url_for, request\nfrom flask_login import current_user, login_required\nfrom . import user\nfrom .. import db\nfrom models import User\n\n\ndef check_admin():\n \"\"\"\n Prevent non-admins from accessing the page\n \"\"\"\n if not current_user.is_admin:\n abort(403)\n\n\n@user.route('/users')\n@login_required\ndef list_users():\n \"\"\"\n List all users ordered by latest\n \"\"\"\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)\n\n\n@user.route('/users/add', methods=['GET', 'POST'])\ndef add_user():\n \"\"\"\n load form page and add to the database\n \"\"\"\n if request.method == 'POST':\n user = User(username=request.form['username'], password=request.\n form['password'], is_admin=request.form.getlist('is_admin'))\n try:\n db.session.add(user)\n db.session.commit()\n flash('Utilizador adicionado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n except:\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n return render_template('user_add.html')\n\n\n@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_user(id):\n check_admin()\n user = User.query.get_or_404(id)\n if request.method == 'POST':\n user.username = request.form['username']\n user.password = request.form['password']\n user.is_admin = request.form.getlist('is_admin')\n db.session.commit()\n flash('Utilizador alterado com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n return render_template('user_edit.html', user=user)\n\n\n@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_user(id):\n \"\"\"\n Delete from database\n \"\"\"\n check_admin()\n user = User.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n flash('Utilizador removido com sucesso.', 'success')\n return redirect(url_for('user.list_users'))\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom flask import abort, flash, redirect, render_template, url_for, request\nfrom flask_login import current_user, login_required\n\nfrom . import user\nfrom .. import db\nfrom models import User\n\n\ndef check_admin():\n \"\"\"\n Prevent non-admins from accessing the page\n \"\"\"\n if not current_user.is_admin:\n abort(403)\n\n@user.route('/users')\n@login_required\ndef list_users():\n \"\"\"\n List all users ordered by latest\n \"\"\"\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)\n\n\n#@login_required\n@user.route('/users/add', methods=['GET', 'POST'])\ndef add_user():\n \"\"\"\n load form page and add to the database\n \"\"\"\n #check_admin()\n\n # if form submit\n if request.method == 'POST':\n # create new user with UI form data\n user = User(username=request.form['username'],\n password=request.form['password'],\n is_admin=request.form.getlist('is_admin'))\n\n try:\n # add user to the database\n db.session.add(user)\n db.session.commit()\n # message to the UI\n flash('Utilizador adicionado com sucesso.', 'success')\n # redirect to the users page\n return redirect(url_for('user.list_users'))\n except:\n # in case user name already exists\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n\n # load add user form template\n return render_template('user_add.html')\n\n\n@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_user(id):\n check_admin()\n\n # get user or error\n user = User.query.get_or_404(id)\n if request.method == 'POST':\n # update user with UI form data\n user.username = request.form['username']\n user.password = request.form['password']\n user.is_admin = request.form.getlist('is_admin')\n # update user in database\n db.session.commit()\n # message to the UI\n flash('Utilizador alterado com sucesso.', 'success')\n\n # redirect to the users page\n return redirect(url_for('user.list_users'))\n\n return render_template('user_edit.html', user=user)\n\n\n@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_user(id):\n \"\"\"\n Delete from database\n \"\"\"\n check_admin()\n\n # get user or error\n user = User.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n flash('Utilizador removido com sucesso.', 'success')\n\n # redirect to the users page\n return redirect(url_for('user.list_users'))\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import cv2
img = cv2.imread('Chapter1/resources/jacuzi.jpg')
imgGrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgCanny = cv2.Canny(img,240,250)
cv2.imshow("output",imgCanny)
cv2.waitKey(0)
|
normal
|
{
"blob_id": "292cfecb701ecc179381d4453063aff532a0e877",
"index": 8961,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('output', imgCanny)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nimg = cv2.imread('Chapter1/resources/jacuzi.jpg')\nimgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimgCanny = cv2.Canny(img, 240, 250)\ncv2.imshow('output', imgCanny)\ncv2.waitKey(0)\n",
"step-4": "import cv2\nimg = cv2.imread('Chapter1/resources/jacuzi.jpg')\nimgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimgCanny = cv2.Canny(img, 240, 250)\ncv2.imshow('output', imgCanny)\ncv2.waitKey(0)\n",
"step-5": "import cv2\n\nimg = cv2.imread('Chapter1/resources/jacuzi.jpg')\nimgGrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nimgCanny = cv2.Canny(img,240,250)\ncv2.imshow(\"output\",imgCanny)\ncv2.waitKey(0)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23'
self._sync_in_pin = 'GPIO2_25'
self._sync_out_pin = 'GPIO1_14'
self.setup_pins()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23'
self._sync_in_pin = 'GPIO2_25'
self._sync_out_pin = 'GPIO1_14'
self.setup_pins()
def setup_pins(self):
GPIO.setup(self._epwm_sync_pin, GPIO.IN)
GPIO.setup(self._sync_in_pin, GPIO.IN)
GPIO.setup(self._sync_out_pin, GPIO.OUT)
def do_syncrecv_test(self):
drs = SerialDRS()
conn = drs.Connect(self._comport, self._baudrate)
if not conn:
print('Erro conexao serial')
return False
print('Iniciando teste dos receptores de fibra - sync')
print('Desliga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.HIGH)
print('Le receptor sync (Esperado = 1)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if sts_sync_in:
print('Liga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.LOW)
print('Le receptor sync (Esperado = 0)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if not sts_sync_in:
print('DRS desligando todos os transmissores')
drs.ClearPof()
print('Lendo EPWM sync (Esperado = 1)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if sts_epwm_sync:
print('DRS ligando todos os transmissores')
drs.SetPof()
print('Lendo EPWM sync (Esperado = 0)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if not sts_epwm_sync:
drs.Disconnect()
return True
print('Falha receptores sync')
drs.Disconnect()
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.dont_write_bytecode = True
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23'
self._sync_in_pin = 'GPIO2_25'
self._sync_out_pin = 'GPIO1_14'
self.setup_pins()
def setup_pins(self):
GPIO.setup(self._epwm_sync_pin, GPIO.IN)
GPIO.setup(self._sync_in_pin, GPIO.IN)
GPIO.setup(self._sync_out_pin, GPIO.OUT)
def do_syncrecv_test(self):
drs = SerialDRS()
conn = drs.Connect(self._comport, self._baudrate)
if not conn:
print('Erro conexao serial')
return False
print('Iniciando teste dos receptores de fibra - sync')
print('Desliga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.HIGH)
print('Le receptor sync (Esperado = 1)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if sts_sync_in:
print('Liga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.LOW)
print('Le receptor sync (Esperado = 0)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if not sts_sync_in:
print('DRS desligando todos os transmissores')
drs.ClearPof()
print('Lendo EPWM sync (Esperado = 1)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if sts_epwm_sync:
print('DRS ligando todos os transmissores')
drs.SetPof()
print('Lendo EPWM sync (Esperado = 0)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if not sts_epwm_sync:
drs.Disconnect()
return True
print('Falha receptores sync')
drs.Disconnect()
return False
<|reserved_special_token_1|>
import Adafruit_BBIO.GPIO as GPIO
from pydrs import SerialDRS
import time
import sys
sys.dont_write_bytecode = True
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23'
self._sync_in_pin = 'GPIO2_25'
self._sync_out_pin = 'GPIO1_14'
self.setup_pins()
def setup_pins(self):
GPIO.setup(self._epwm_sync_pin, GPIO.IN)
GPIO.setup(self._sync_in_pin, GPIO.IN)
GPIO.setup(self._sync_out_pin, GPIO.OUT)
def do_syncrecv_test(self):
drs = SerialDRS()
conn = drs.Connect(self._comport, self._baudrate)
if not conn:
print('Erro conexao serial')
return False
print('Iniciando teste dos receptores de fibra - sync')
print('Desliga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.HIGH)
print('Le receptor sync (Esperado = 1)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if sts_sync_in:
print('Liga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.LOW)
print('Le receptor sync (Esperado = 0)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if not sts_sync_in:
print('DRS desligando todos os transmissores')
drs.ClearPof()
print('Lendo EPWM sync (Esperado = 1)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if sts_epwm_sync:
print('DRS ligando todos os transmissores')
drs.SetPof()
print('Lendo EPWM sync (Esperado = 0)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if not sts_epwm_sync:
drs.Disconnect()
return True
print('Falha receptores sync')
drs.Disconnect()
return False
<|reserved_special_token_1|>
import Adafruit_BBIO.GPIO as GPIO
from pydrs import SerialDRS
import time
import sys
sys.dont_write_bytecode = True
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23' # Input in BBB perspective
self._sync_in_pin = 'GPIO2_25' # Input in BBB perspective
self._sync_out_pin = 'GPIO1_14' # Output in BBB perspective
self.setup_pins()
def setup_pins(self):
GPIO.setup(self._epwm_sync_pin, GPIO.IN)
GPIO.setup(self._sync_in_pin, GPIO.IN)
GPIO.setup(self._sync_out_pin, GPIO.OUT)
def do_syncrecv_test(self):
drs = SerialDRS()
conn = drs.Connect(self._comport, self._baudrate)
if not conn:
print("Erro conexao serial")
return False
print("Iniciando teste dos receptores de fibra - sync")
print('Desliga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.HIGH) # Desliga transmissor
print('Le receptor sync (Esperado = 1)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if sts_sync_in:
print('Liga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.LOW)
print('Le receptor sync (Esperado = 0)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if not sts_sync_in:
print('DRS desligando todos os transmissores')
drs.ClearPof()
print('Lendo EPWM sync (Esperado = 1)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if sts_epwm_sync:
print('DRS ligando todos os transmissores')
drs.SetPof()
print('Lendo EPWM sync (Esperado = 0)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if not sts_epwm_sync:
drs.Disconnect()
return True
print("Falha receptores sync")
drs.Disconnect()
return False
|
flexible
|
{
"blob_id": "c716f43dbe62f662c60653f09be946a27c3fff66",
"index": 8069,
"step-1": "<mask token>\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n if not conn:\n print('Erro conexao serial')\n return False\n print('Iniciando teste dos receptores de fibra - sync')\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH)\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if sts_sync_in:\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if not sts_sync_in:\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print('Falha receptores sync')\n drs.Disconnect()\n return False\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n if not conn:\n print('Erro conexao serial')\n return False\n print('Iniciando teste dos receptores de fibra - sync')\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH)\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if sts_sync_in:\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if not sts_sync_in:\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print('Falha receptores sync')\n drs.Disconnect()\n return False\n",
"step-4": "import Adafruit_BBIO.GPIO as GPIO\nfrom pydrs import SerialDRS\nimport time\nimport sys\nsys.dont_write_bytecode = True\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n if not conn:\n print('Erro conexao serial')\n return False\n print('Iniciando teste dos receptores de fibra - sync')\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH)\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if sts_sync_in:\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if not sts_sync_in:\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print('Falha receptores sync')\n drs.Disconnect()\n return False\n",
"step-5": "import Adafruit_BBIO.GPIO as GPIO\nfrom pydrs import SerialDRS\nimport time\nimport sys\n\nsys.dont_write_bytecode = True\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23' # Input in BBB perspective\n self._sync_in_pin = 'GPIO2_25' # Input in BBB perspective\n self._sync_out_pin = 'GPIO1_14' # Output in BBB perspective\n\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n\n if not conn:\n print(\"Erro conexao serial\")\n return False\n\n print(\"Iniciando teste dos receptores de fibra - sync\")\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH) # Desliga transmissor\n\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n\n if sts_sync_in:\n\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n\n if not sts_sync_in:\n\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print(\"Falha receptores sync\")\n drs.Disconnect()\n return False\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class retrieve_open_space(dml.Algorithm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None
):
"""
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
"""
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')
doc.add_namespace('dat', 'http://datamechanics.io/data/')
doc.add_namespace('ont', 'http://datamechanics.io/ontology#')
doc.add_namespace('log', 'http://datamechanics.io/log/')
doc.add_namespace('ops',
'http://bostonopendata-boston.opendata.arcgis.com/datasets/')
this_script = doc.agent('alg:bmroach#open_space', {prov.model.
PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}
)
resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {
'prov:label': '311, Service Requests', prov.model.PROV_TYPE:
'ont:DataResource', 'ont:Extension': 'geojson'})
get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),
startTime, endTime)
doc.wasAssociatedWith(get_open_space, this_script)
doc.usage(get_open_space, resource, startTime, None, {prov.model.
PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})
open_space = doc.entity('dat:bmroach#open_space', {prov.model.
PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAttributedTo(open_space, this_script)
doc.wasGeneratedBy(open_space, get_open_space, endTime)
doc.wasDerivedFrom(open_space, resource, get_open_space,
get_open_space, get_open_space)
repo.logout()
return doc
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class retrieve_open_space(dml.Algorithm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def execute(trial=False, log=False):
"""Retrieves open spaces in Boston as geoJSON"""
startTime = datetime.datetime.now()
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
repo.dropCollection('open_space')
repo.createCollection('open_space')
url = (
'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'
)
response = urllib.request.urlopen(url).read().decode('utf-8')
gj = geojson.loads(response)
geoDict = dict(gj)
geoList = geoDict['features']
repo['bmroach.open_space'].insert_many(geoList)
repo['bmroach.open_space'].metadata({'complete': True})
repo.logout()
endTime = datetime.datetime.now()
return {'start': startTime, 'end': endTime}
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None
):
"""
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
"""
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')
doc.add_namespace('dat', 'http://datamechanics.io/data/')
doc.add_namespace('ont', 'http://datamechanics.io/ontology#')
doc.add_namespace('log', 'http://datamechanics.io/log/')
doc.add_namespace('ops',
'http://bostonopendata-boston.opendata.arcgis.com/datasets/')
this_script = doc.agent('alg:bmroach#open_space', {prov.model.
PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}
)
resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {
'prov:label': '311, Service Requests', prov.model.PROV_TYPE:
'ont:DataResource', 'ont:Extension': 'geojson'})
get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),
startTime, endTime)
doc.wasAssociatedWith(get_open_space, this_script)
doc.usage(get_open_space, resource, startTime, None, {prov.model.
PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})
open_space = doc.entity('dat:bmroach#open_space', {prov.model.
PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAttributedTo(open_space, this_script)
doc.wasGeneratedBy(open_space, get_open_space, endTime)
doc.wasDerivedFrom(open_space, resource, get_open_space,
get_open_space, get_open_space)
repo.logout()
return doc
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class retrieve_open_space(dml.Algorithm):
contributor = 'bmroach'
reads = []
writes = ['bmroach.open_space']
@staticmethod
def execute(trial=False, log=False):
"""Retrieves open spaces in Boston as geoJSON"""
startTime = datetime.datetime.now()
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
repo.dropCollection('open_space')
repo.createCollection('open_space')
url = (
'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'
)
response = urllib.request.urlopen(url).read().decode('utf-8')
gj = geojson.loads(response)
geoDict = dict(gj)
geoList = geoDict['features']
repo['bmroach.open_space'].insert_many(geoList)
repo['bmroach.open_space'].metadata({'complete': True})
repo.logout()
endTime = datetime.datetime.now()
return {'start': startTime, 'end': endTime}
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None
):
"""
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
"""
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')
doc.add_namespace('dat', 'http://datamechanics.io/data/')
doc.add_namespace('ont', 'http://datamechanics.io/ontology#')
doc.add_namespace('log', 'http://datamechanics.io/log/')
doc.add_namespace('ops',
'http://bostonopendata-boston.opendata.arcgis.com/datasets/')
this_script = doc.agent('alg:bmroach#open_space', {prov.model.
PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}
)
resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {
'prov:label': '311, Service Requests', prov.model.PROV_TYPE:
'ont:DataResource', 'ont:Extension': 'geojson'})
get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),
startTime, endTime)
doc.wasAssociatedWith(get_open_space, this_script)
doc.usage(get_open_space, resource, startTime, None, {prov.model.
PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})
open_space = doc.entity('dat:bmroach#open_space', {prov.model.
PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAttributedTo(open_space, this_script)
doc.wasGeneratedBy(open_space, get_open_space, endTime)
doc.wasDerivedFrom(open_space, resource, get_open_space,
get_open_space, get_open_space)
repo.logout()
return doc
<|reserved_special_token_1|>
import urllib.request
import json
import dml, prov.model
import datetime, uuid
import geojson
<|reserved_special_token_0|>
class retrieve_open_space(dml.Algorithm):
contributor = 'bmroach'
reads = []
writes = ['bmroach.open_space']
@staticmethod
def execute(trial=False, log=False):
"""Retrieves open spaces in Boston as geoJSON"""
startTime = datetime.datetime.now()
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
repo.dropCollection('open_space')
repo.createCollection('open_space')
url = (
'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'
)
response = urllib.request.urlopen(url).read().decode('utf-8')
gj = geojson.loads(response)
geoDict = dict(gj)
geoList = geoDict['features']
repo['bmroach.open_space'].insert_many(geoList)
repo['bmroach.open_space'].metadata({'complete': True})
repo.logout()
endTime = datetime.datetime.now()
return {'start': startTime, 'end': endTime}
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None
):
"""
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
"""
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')
doc.add_namespace('dat', 'http://datamechanics.io/data/')
doc.add_namespace('ont', 'http://datamechanics.io/ontology#')
doc.add_namespace('log', 'http://datamechanics.io/log/')
doc.add_namespace('ops',
'http://bostonopendata-boston.opendata.arcgis.com/datasets/')
this_script = doc.agent('alg:bmroach#open_space', {prov.model.
PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}
)
resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {
'prov:label': '311, Service Requests', prov.model.PROV_TYPE:
'ont:DataResource', 'ont:Extension': 'geojson'})
get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),
startTime, endTime)
doc.wasAssociatedWith(get_open_space, this_script)
doc.usage(get_open_space, resource, startTime, None, {prov.model.
PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})
open_space = doc.entity('dat:bmroach#open_space', {prov.model.
PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAttributedTo(open_space, this_script)
doc.wasGeneratedBy(open_space, get_open_space, endTime)
doc.wasDerivedFrom(open_space, resource, get_open_space,
get_open_space, get_open_space)
repo.logout()
return doc
<|reserved_special_token_1|>
import urllib.request
import json
import dml, prov.model
import datetime, uuid
import geojson
# import csv
"""
Skelton file provided by lapets@bu.edu
Heavily modified by bmroach@bu.edu
City of Boston Open Spaces (Like parks, etc)
Development notes:
"""
class retrieve_open_space(dml.Algorithm):
contributor = 'bmroach'
reads = []
writes = ['bmroach.open_space']
@staticmethod
def execute(trial = False, log=False):
'''Retrieves open spaces in Boston as geoJSON'''
startTime = datetime.datetime.now()
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
# Do retrieving of data
repo.dropCollection("open_space")
repo.createCollection("open_space")
url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'
response = urllib.request.urlopen(url).read().decode("utf-8")
gj = geojson.loads(response)
geoDict = dict(gj)
geoList = geoDict['features']
repo['bmroach.open_space'].insert_many( geoList )
repo['bmroach.open_space'].metadata({'complete':True})
repo.logout()
endTime = datetime.datetime.now()
return {"start":startTime, "end":endTime}
@staticmethod
def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('bmroach', 'bmroach')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
doc.add_namespace('ops', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')
this_script = doc.agent('alg:bmroach#open_space', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})
resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {'prov:label':'311, Service Requests', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'geojson'})
get_open_space = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)
doc.wasAssociatedWith(get_open_space, this_script)
doc.usage(get_open_space,resource, startTime, None,
{prov.model.PROV_TYPE:'ont:Retrieval',
'ont:Query':''
}
)
open_space = doc.entity('dat:bmroach#open_space', {prov.model.PROV_LABEL:'open_space', prov.model.PROV_TYPE:'ont:DataSet'})
doc.wasAttributedTo(open_space, this_script)
doc.wasGeneratedBy(open_space, get_open_space, endTime)
doc.wasDerivedFrom(open_space, resource, get_open_space, get_open_space, get_open_space)
repo.logout()
return doc
# retrieve_open_space.execute()
# doc = retrieve_open_space.provenance()
# print(doc.get_provn())
# print(json.dumps(json.loads(doc.serialize()), indent=4))
## eof
|
flexible
|
{
"blob_id": "2c82dd33180a7442607e5cbedf8846bd72b37150",
"index": 9914,
"step-1": "<mask token>\n\n\nclass retrieve_open_space(dml.Algorithm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None\n ):\n \"\"\"\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n \"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('ops',\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:bmroach#open_space', {prov.model.\n PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}\n )\n resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {\n 'prov:label': '311, Service Requests', prov.model.PROV_TYPE:\n 'ont:DataResource', 'ont:Extension': 'geojson'})\n get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),\n startTime, endTime)\n doc.wasAssociatedWith(get_open_space, this_script)\n doc.usage(get_open_space, resource, startTime, None, {prov.model.\n PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})\n open_space = doc.entity('dat:bmroach#open_space', {prov.model.\n PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(open_space, this_script)\n doc.wasGeneratedBy(open_space, get_open_space, endTime)\n doc.wasDerivedFrom(open_space, resource, get_open_space,\n get_open_space, get_open_space)\n repo.logout()\n return doc\n",
"step-2": "<mask token>\n\n\nclass retrieve_open_space(dml.Algorithm):\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def execute(trial=False, log=False):\n \"\"\"Retrieves open spaces in Boston as geoJSON\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n repo.dropCollection('open_space')\n repo.createCollection('open_space')\n url = (\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'\n )\n response = urllib.request.urlopen(url).read().decode('utf-8')\n gj = geojson.loads(response)\n geoDict = dict(gj)\n geoList = geoDict['features']\n repo['bmroach.open_space'].insert_many(geoList)\n repo['bmroach.open_space'].metadata({'complete': True})\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n @staticmethod\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None\n ):\n \"\"\"\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n \"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('ops',\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:bmroach#open_space', {prov.model.\n PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}\n )\n resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {\n 'prov:label': '311, Service Requests', prov.model.PROV_TYPE:\n 'ont:DataResource', 'ont:Extension': 'geojson'})\n get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),\n startTime, endTime)\n doc.wasAssociatedWith(get_open_space, this_script)\n doc.usage(get_open_space, resource, startTime, None, {prov.model.\n PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})\n open_space = doc.entity('dat:bmroach#open_space', {prov.model.\n PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(open_space, this_script)\n doc.wasGeneratedBy(open_space, get_open_space, endTime)\n doc.wasDerivedFrom(open_space, resource, get_open_space,\n get_open_space, get_open_space)\n repo.logout()\n return doc\n",
"step-3": "<mask token>\n\n\nclass retrieve_open_space(dml.Algorithm):\n contributor = 'bmroach'\n reads = []\n writes = ['bmroach.open_space']\n\n @staticmethod\n def execute(trial=False, log=False):\n \"\"\"Retrieves open spaces in Boston as geoJSON\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n repo.dropCollection('open_space')\n repo.createCollection('open_space')\n url = (\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'\n )\n response = urllib.request.urlopen(url).read().decode('utf-8')\n gj = geojson.loads(response)\n geoDict = dict(gj)\n geoList = geoDict['features']\n repo['bmroach.open_space'].insert_many(geoList)\n repo['bmroach.open_space'].metadata({'complete': True})\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n @staticmethod\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None\n ):\n \"\"\"\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n \"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('ops',\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:bmroach#open_space', {prov.model.\n PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}\n )\n resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {\n 'prov:label': '311, Service Requests', prov.model.PROV_TYPE:\n 'ont:DataResource', 'ont:Extension': 'geojson'})\n get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),\n startTime, endTime)\n doc.wasAssociatedWith(get_open_space, this_script)\n doc.usage(get_open_space, resource, startTime, None, {prov.model.\n PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})\n open_space = doc.entity('dat:bmroach#open_space', {prov.model.\n PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(open_space, this_script)\n doc.wasGeneratedBy(open_space, get_open_space, endTime)\n doc.wasDerivedFrom(open_space, resource, get_open_space,\n get_open_space, get_open_space)\n repo.logout()\n return doc\n",
"step-4": "import urllib.request\nimport json\nimport dml, prov.model\nimport datetime, uuid\nimport geojson\n<mask token>\n\n\nclass retrieve_open_space(dml.Algorithm):\n contributor = 'bmroach'\n reads = []\n writes = ['bmroach.open_space']\n\n @staticmethod\n def execute(trial=False, log=False):\n \"\"\"Retrieves open spaces in Boston as geoJSON\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n repo.dropCollection('open_space')\n repo.createCollection('open_space')\n url = (\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'\n )\n response = urllib.request.urlopen(url).read().decode('utf-8')\n gj = geojson.loads(response)\n geoDict = dict(gj)\n geoList = geoDict['features']\n repo['bmroach.open_space'].insert_many(geoList)\n repo['bmroach.open_space'].metadata({'complete': True})\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n @staticmethod\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None\n ):\n \"\"\"\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n \"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('ops',\n 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:bmroach#open_space', {prov.model.\n PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'}\n )\n resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {\n 'prov:label': '311, Service Requests', prov.model.PROV_TYPE:\n 'ont:DataResource', 'ont:Extension': 'geojson'})\n get_open_space = doc.activity('log:uuid' + str(uuid.uuid4()),\n startTime, endTime)\n doc.wasAssociatedWith(get_open_space, this_script)\n doc.usage(get_open_space, resource, startTime, None, {prov.model.\n PROV_TYPE: 'ont:Retrieval', 'ont:Query': ''})\n open_space = doc.entity('dat:bmroach#open_space', {prov.model.\n PROV_LABEL: 'open_space', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(open_space, this_script)\n doc.wasGeneratedBy(open_space, get_open_space, endTime)\n doc.wasDerivedFrom(open_space, resource, get_open_space,\n get_open_space, get_open_space)\n repo.logout()\n return doc\n",
"step-5": "import urllib.request\nimport json\nimport dml, prov.model\nimport datetime, uuid\nimport geojson\n# import csv\n\n\"\"\"\nSkelton file provided by lapets@bu.edu\nHeavily modified by bmroach@bu.edu\n\nCity of Boston Open Spaces (Like parks, etc)\n\nDevelopment notes:\n\n\n\"\"\"\n\nclass retrieve_open_space(dml.Algorithm):\n contributor = 'bmroach'\n reads = []\n writes = ['bmroach.open_space']\n\n @staticmethod\n def execute(trial = False, log=False):\n '''Retrieves open spaces in Boston as geoJSON'''\n startTime = datetime.datetime.now()\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n \n # Do retrieving of data\n repo.dropCollection(\"open_space\")\n repo.createCollection(\"open_space\") \n \n url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/2868d370c55d4d458d4ae2224ef8cddd_7.geojson'\n response = urllib.request.urlopen(url).read().decode(\"utf-8\") \n gj = geojson.loads(response)\n geoDict = dict(gj)\n geoList = geoDict['features']\n repo['bmroach.open_space'].insert_many( geoList )\n repo['bmroach.open_space'].metadata({'complete':True}) \n repo.logout()\n endTime = datetime.datetime.now()\n return {\"start\":startTime, \"end\":endTime}\n \n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bmroach', 'bmroach')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log. \n doc.add_namespace('ops', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n\n this_script = doc.agent('alg:bmroach#open_space', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n \n resource = doc.entity('ops:2868d370c55d4d458d4ae2224ef8cddd_7', {'prov:label':'311, Service Requests', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'geojson'})\n \n get_open_space = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n \n doc.wasAssociatedWith(get_open_space, this_script)\n \n doc.usage(get_open_space,resource, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval',\n 'ont:Query':'' \n }\n )\n \n \n\n open_space = doc.entity('dat:bmroach#open_space', {prov.model.PROV_LABEL:'open_space', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAttributedTo(open_space, this_script)\n doc.wasGeneratedBy(open_space, get_open_space, endTime)\n \n doc.wasDerivedFrom(open_space, resource, get_open_space, get_open_space, get_open_space)\n \n repo.logout() \n return doc\n\n\n\n\n\n\n# retrieve_open_space.execute()\n# doc = retrieve_open_space.provenance()\n# print(doc.get_provn())\n# print(json.dumps(json.loads(doc.serialize()), indent=4))\n\n## eof\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_statement(month=None):
def _get_pdfkit_config():
if os.getenv('FLASK_ENV') == 'production':
WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(
'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=
subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
else:
return pdfkit.configuration()
def create_pdf(pdf_content, filename):
options = {'margin-top': '10mm', 'margin-bottom': '10mm',
'margin-left': '10mm', 'margin-right': '10mm', 'page-size':
'A4', 'page-width': '210mm', 'page-height': '296mm'}
pdf = pdfkit.from_string(pdf_content, False, configuration=
_get_pdfkit_config(), options=options)
temp_file = tempfile.TemporaryFile()
temp_file.filename = filename
temp_file.content_type = 'application/pdf'
temp_file.write(pdf)
temp_file.seek(0)
return temp_file
if month == None:
year = datetime.datetime.now().year
full_month = datetime.date.today().strftime('%B %Y')
short_month = datetime.date.today().strftime('%b')
else:
year_month = month.split('-')
year = int(year_month[0])
short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(
'%b')
full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(
'%B %Y')
users = User.select()
for user in users:
record = Statement.get_or_none(Statement.user == user.id, Statement
.month == full_month)
if not record:
expenses = Expense.select().where(Expense.cat in user.
categories, Expense.month == short_month, Expense.
created_at.year == year).order_by(Expense.created_at.asc())
total = 0
for exp in expenses:
total += exp.amount
html = render_template('expenses/statement.html', expenses=
expenses, total=total, month=str(full_month))
pdf_name = user.username.replace(' ', '-').lower() + '-' + str(
full_month).replace(' ', '-')
temp_file = create_pdf(html, pdf_name)
statement_url = upload_image_to_s3(user.id, temp_file)
print(statement_url)
statement = Statement(user=user.id, exp_url=statement_url,
month=full_month)
statement.save()
"""
Send monthly statement email
"""
else:
print('already exist!')
<|reserved_special_token_1|>
import os
from app_web import sg
from sendgrid.helpers.mail import *
import pdfkit
from models.user import User
from models.expense import Expense
from models.statement import Statement
from models.category import Category
import tempfile
import subprocess
from .aws_uploader import upload_image_to_s3
import datetime
from peewee import fn
from flask import render_template
def create_statement(month=None):
def _get_pdfkit_config():
if os.getenv('FLASK_ENV') == 'production':
WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(
'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=
subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
else:
return pdfkit.configuration()
def create_pdf(pdf_content, filename):
options = {'margin-top': '10mm', 'margin-bottom': '10mm',
'margin-left': '10mm', 'margin-right': '10mm', 'page-size':
'A4', 'page-width': '210mm', 'page-height': '296mm'}
pdf = pdfkit.from_string(pdf_content, False, configuration=
_get_pdfkit_config(), options=options)
temp_file = tempfile.TemporaryFile()
temp_file.filename = filename
temp_file.content_type = 'application/pdf'
temp_file.write(pdf)
temp_file.seek(0)
return temp_file
if month == None:
year = datetime.datetime.now().year
full_month = datetime.date.today().strftime('%B %Y')
short_month = datetime.date.today().strftime('%b')
else:
year_month = month.split('-')
year = int(year_month[0])
short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(
'%b')
full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(
'%B %Y')
users = User.select()
for user in users:
record = Statement.get_or_none(Statement.user == user.id, Statement
.month == full_month)
if not record:
expenses = Expense.select().where(Expense.cat in user.
categories, Expense.month == short_month, Expense.
created_at.year == year).order_by(Expense.created_at.asc())
total = 0
for exp in expenses:
total += exp.amount
html = render_template('expenses/statement.html', expenses=
expenses, total=total, month=str(full_month))
pdf_name = user.username.replace(' ', '-').lower() + '-' + str(
full_month).replace(' ', '-')
temp_file = create_pdf(html, pdf_name)
statement_url = upload_image_to_s3(user.id, temp_file)
print(statement_url)
statement = Statement(user=user.id, exp_url=statement_url,
month=full_month)
statement.save()
"""
Send monthly statement email
"""
else:
print('already exist!')
<|reserved_special_token_1|>
import os
from app_web import sg
from sendgrid.helpers.mail import *
import pdfkit
from models.user import User
from models.expense import Expense
from models.statement import Statement
from models.category import Category
import tempfile
import subprocess
from .aws_uploader import upload_image_to_s3
import datetime
from peewee import fn
from flask import render_template
def create_statement(month=None):
def _get_pdfkit_config():
if os.getenv('FLASK_ENV') == 'production':
WKHTMLTOPDF_CMD = subprocess.Popen(
['which', os.environ.get(
'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],
stdout=subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
else:
return pdfkit.configuration()
def create_pdf(pdf_content, filename):
options = {
'margin-top': '10mm',
'margin-bottom': '10mm',
'margin-left': '10mm',
'margin-right': '10mm',
'page-size': 'A4',
'page-width': '210mm',
'page-height': '296mm'
}
pdf = pdfkit.from_string(
pdf_content, False, configuration=_get_pdfkit_config(), options=options)
temp_file = tempfile.TemporaryFile()
temp_file.filename = filename
temp_file.content_type = "application/pdf"
temp_file.write(pdf)
temp_file.seek(0)
return temp_file
if month == None :
year = datetime.datetime.now().year
full_month = datetime.date.today().strftime("%B %Y") # current month
short_month = datetime.date.today().strftime("%b")
else:
# '2020-12' convert to 'December 2020'
year_month = month.split('-') # ['2020','12']
year = int(year_month[0])
short_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%b")
full_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%B %Y")
# select all user from database
users = User.select()
# get all expenses to render in template
for user in users:
record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)
if not record:
expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())
# ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)
total = 0
for exp in expenses:
total += exp.amount
html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))
pdf_name = (user.username).replace(" ", "-").lower() + "-" + str(full_month).replace(" ", "-")
temp_file = create_pdf(html, pdf_name)
statement_url = upload_image_to_s3(user.id ,temp_file)
print(statement_url)
statement = Statement(user=user.id, exp_url=statement_url, month=full_month)
statement.save()
'''
Send monthly statement email
'''
# message = Mail(
# from_email="leongjinqwen@gmail.com",
# to_emails=user.email,
# subject=f"{month} Expenses Statement",
# html_content=Content("text/html", f"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>")
# )
# try:
# response = sg.send(message)
# print(response.body)
# except Exception as e:
# print(str(e))
else:
print('already exist!')
|
flexible
|
{
"blob_id": "55df8d13ddf28f7b0477329bee743471a0780f24",
"index": 3253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_statement(month=None):\n\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=\n subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {'margin-top': '10mm', 'margin-bottom': '10mm',\n 'margin-left': '10mm', 'margin-right': '10mm', 'page-size':\n 'A4', 'page-width': '210mm', 'page-height': '296mm'}\n pdf = pdfkit.from_string(pdf_content, False, configuration=\n _get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = 'application/pdf'\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n if month == None:\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime('%B %Y')\n short_month = datetime.date.today().strftime('%b')\n else:\n year_month = month.split('-')\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%b')\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%B %Y')\n users = User.select()\n for user in users:\n record = Statement.get_or_none(Statement.user == user.id, Statement\n .month == full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.\n categories, Expense.month == short_month, Expense.\n created_at.year == year).order_by(Expense.created_at.asc())\n total = 0\n for exp in expenses:\n total += exp.amount\n html = render_template('expenses/statement.html', expenses=\n expenses, total=total, month=str(full_month))\n pdf_name = user.username.replace(' ', '-').lower() + '-' + str(\n full_month).replace(' ', '-')\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id, temp_file)\n print(statement_url)\n statement = Statement(user=user.id, exp_url=statement_url,\n month=full_month)\n statement.save()\n \"\"\"\n Send monthly statement email\n \"\"\"\n else:\n print('already exist!')\n",
"step-3": "import os\nfrom app_web import sg\nfrom sendgrid.helpers.mail import *\nimport pdfkit\nfrom models.user import User\nfrom models.expense import Expense\nfrom models.statement import Statement\nfrom models.category import Category\nimport tempfile\nimport subprocess\nfrom .aws_uploader import upload_image_to_s3\nimport datetime\nfrom peewee import fn\nfrom flask import render_template\n\n\ndef create_statement(month=None):\n\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=\n subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {'margin-top': '10mm', 'margin-bottom': '10mm',\n 'margin-left': '10mm', 'margin-right': '10mm', 'page-size':\n 'A4', 'page-width': '210mm', 'page-height': '296mm'}\n pdf = pdfkit.from_string(pdf_content, False, configuration=\n _get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = 'application/pdf'\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n if month == None:\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime('%B %Y')\n short_month = datetime.date.today().strftime('%b')\n else:\n year_month = month.split('-')\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%b')\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%B %Y')\n users = User.select()\n for user in users:\n record = Statement.get_or_none(Statement.user == user.id, Statement\n .month == full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.\n categories, Expense.month == short_month, Expense.\n created_at.year == year).order_by(Expense.created_at.asc())\n total = 0\n for exp in expenses:\n total += exp.amount\n html = render_template('expenses/statement.html', expenses=\n expenses, total=total, month=str(full_month))\n pdf_name = user.username.replace(' ', '-').lower() + '-' + str(\n full_month).replace(' ', '-')\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id, temp_file)\n print(statement_url)\n statement = Statement(user=user.id, exp_url=statement_url,\n month=full_month)\n statement.save()\n \"\"\"\n Send monthly statement email\n \"\"\"\n else:\n print('already exist!')\n",
"step-4": "import os\nfrom app_web import sg\nfrom sendgrid.helpers.mail import *\nimport pdfkit\nfrom models.user import User\nfrom models.expense import Expense\nfrom models.statement import Statement\nfrom models.category import Category\nimport tempfile\nimport subprocess\nfrom .aws_uploader import upload_image_to_s3\nimport datetime\nfrom peewee import fn\nfrom flask import render_template\n\ndef create_statement(month=None):\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(\n ['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],\n stdout=subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {\n 'margin-top': '10mm',\n 'margin-bottom': '10mm',\n 'margin-left': '10mm',\n 'margin-right': '10mm',\n 'page-size': 'A4',\n 'page-width': '210mm',\n 'page-height': '296mm'\n }\n pdf = pdfkit.from_string(\n pdf_content, False, configuration=_get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = \"application/pdf\"\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n\n if month == None :\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime(\"%B %Y\") # current month\n short_month = datetime.date.today().strftime(\"%b\")\n else:\n # '2020-12' convert to 'December 2020'\n year_month = month.split('-') # ['2020','12']\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\"%b\")\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\"%B %Y\")\n\n # select all user from database\n users = User.select()\n # get all expenses to render in template\n for user in users:\n record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())\n # ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)\n total = 0\n for exp in expenses:\n total += exp.amount\n\n html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))\n pdf_name = (user.username).replace(\" \", \"-\").lower() + \"-\" + str(full_month).replace(\" \", \"-\")\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id ,temp_file)\n print(statement_url)\n\n statement = Statement(user=user.id, exp_url=statement_url, month=full_month)\n statement.save()\n '''\n Send monthly statement email\n '''\n # message = Mail(\n # from_email=\"leongjinqwen@gmail.com\",\n # to_emails=user.email,\n # subject=f\"{month} Expenses Statement\",\n # html_content=Content(\"text/html\", f\"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>\")\n # )\n # try:\n # response = sg.send(message)\n # print(response.body)\n # except Exception as e:\n # print(str(e))\n else:\n print('already exist!')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# =============================================================================
# Created By : Mohsen Malmir
# Created Date: Fri Nov 09 8:10 PM EST 2018
# Purpose : this file implements the gui handling to interact with emulators
# =============================================================================
from AppKit import NSWorkspace,NSApplicationActivateIgnoringOtherApps
from Quartz import CGWindowListCopyWindowInfo,kCGWindowListOptionOnScreenOnly
from Quartz import kCGWindowListExcludeDesktopElements,kCGNullWindowID
# this is a list of pairs of (emulator, game) that is supported to interact with
supported_emus = ["OpenEmu"]
supported_games = ["Mortal Kombat 3"]
def activate_emu():
"""
This function scans all the open windows and returns a handle to the first known
and supported emulator-game pair.
Args:
None
Returns:
"""
# get a list of all open windows
windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly&kCGWindowListExcludeDesktopElements,kCGNullWindowID)
winname_list = [w.get("kCGWindowName", u"Unknown") for w in windows]
winrect_list = [w["kCGWindowBounds"] for w in windows]
# first find the Emulator
ws = NSWorkspace.sharedWorkspace()
runningApps = ws.runningApplications()
# the running processes are checked by their localized name, e.g. "OpenEmu"
ra_names = [ra.localizedName() for ra in runningApps]
for ii, emu in enumerate(supported_emus):
if emu in ra_names: # if a supported emu is found, check for corresponding games
if supported_games[ii] in winname_list: # we foudn a supported game of the target emu
# activate the emu window
emu_idx = ra_names.index(emu)
runningApps[emu_idx].activateWithOptions_(NSApplicationActivateIgnoringOtherApps)
# get the window coordinates
idx = winname_list.index(supported_games[ii])
rect = winrect_list[idx]
rect = [rect.get("X"),rect.get("Y"),rect.get("Width"),rect.get("Height")]
rect = list(map(int,rect))
return rect, emu, supported_games[ii]
return None
if __name__ == "__main__":
print(activate_emu())
|
normal
|
{
"blob_id": "043ea0efd490522de4f6ee4913c8d66029b34ff5",
"index": 5136,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly &\n kCGWindowListExcludeDesktopElements, kCGNullWindowID)\n winname_list = [w.get('kCGWindowName', u'Unknown') for w in windows]\n winrect_list = [w['kCGWindowBounds'] for w in windows]\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n ra_names = [ra.localizedName() for ra in runningApps]\n for ii, emu in enumerate(supported_emus):\n if emu in ra_names:\n if supported_games[ii] in winname_list:\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(\n NSApplicationActivateIgnoringOtherApps)\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get('X'), rect.get('Y'), rect.get('Width'),\n rect.get('Height')]\n rect = list(map(int, rect))\n return rect, emu, supported_games[ii]\n return None\n\n\nif __name__ == '__main__':\n print(activate_emu())\n",
"step-3": "<mask token>\nsupported_emus = ['OpenEmu']\nsupported_games = ['Mortal Kombat 3']\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly &\n kCGWindowListExcludeDesktopElements, kCGNullWindowID)\n winname_list = [w.get('kCGWindowName', u'Unknown') for w in windows]\n winrect_list = [w['kCGWindowBounds'] for w in windows]\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n ra_names = [ra.localizedName() for ra in runningApps]\n for ii, emu in enumerate(supported_emus):\n if emu in ra_names:\n if supported_games[ii] in winname_list:\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(\n NSApplicationActivateIgnoringOtherApps)\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get('X'), rect.get('Y'), rect.get('Width'),\n rect.get('Height')]\n rect = list(map(int, rect))\n return rect, emu, supported_games[ii]\n return None\n\n\nif __name__ == '__main__':\n print(activate_emu())\n",
"step-4": "from AppKit import NSWorkspace, NSApplicationActivateIgnoringOtherApps\nfrom Quartz import CGWindowListCopyWindowInfo, kCGWindowListOptionOnScreenOnly\nfrom Quartz import kCGWindowListExcludeDesktopElements, kCGNullWindowID\nsupported_emus = ['OpenEmu']\nsupported_games = ['Mortal Kombat 3']\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly &\n kCGWindowListExcludeDesktopElements, kCGNullWindowID)\n winname_list = [w.get('kCGWindowName', u'Unknown') for w in windows]\n winrect_list = [w['kCGWindowBounds'] for w in windows]\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n ra_names = [ra.localizedName() for ra in runningApps]\n for ii, emu in enumerate(supported_emus):\n if emu in ra_names:\n if supported_games[ii] in winname_list:\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(\n NSApplicationActivateIgnoringOtherApps)\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get('X'), rect.get('Y'), rect.get('Width'),\n rect.get('Height')]\n rect = list(map(int, rect))\n return rect, emu, supported_games[ii]\n return None\n\n\nif __name__ == '__main__':\n print(activate_emu())\n",
"step-5": "# =============================================================================\n# Created By : Mohsen Malmir\n# Created Date: Fri Nov 09 8:10 PM EST 2018\n# Purpose : this file implements the gui handling to interact with emulators\n# =============================================================================\n\nfrom AppKit import NSWorkspace,NSApplicationActivateIgnoringOtherApps\nfrom Quartz import CGWindowListCopyWindowInfo,kCGWindowListOptionOnScreenOnly\nfrom Quartz import kCGWindowListExcludeDesktopElements,kCGNullWindowID\n\n# this is a list of pairs of (emulator, game) that is supported to interact with\nsupported_emus = [\"OpenEmu\"]\nsupported_games = [\"Mortal Kombat 3\"]\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n # get a list of all open windows\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly&kCGWindowListExcludeDesktopElements,kCGNullWindowID)\n winname_list = [w.get(\"kCGWindowName\", u\"Unknown\") for w in windows]\n winrect_list = [w[\"kCGWindowBounds\"] for w in windows]\n # first find the Emulator\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n # the running processes are checked by their localized name, e.g. \"OpenEmu\"\n ra_names = [ra.localizedName() for ra in runningApps] \n for ii, emu in enumerate(supported_emus):\n if emu in ra_names: # if a supported emu is found, check for corresponding games\n if supported_games[ii] in winname_list: # we foudn a supported game of the target emu\n # activate the emu window\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(NSApplicationActivateIgnoringOtherApps)\n # get the window coordinates\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get(\"X\"),rect.get(\"Y\"),rect.get(\"Width\"),rect.get(\"Height\")]\n rect = list(map(int,rect))\n return rect, emu, supported_games[ii]\n return None\n\nif __name__ == \"__main__\":\n print(activate_emu())\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from PIL import Image, ImageDraw, ImageFont
import sys
### Create 1024,1024 pixel image with a white background.
img = Image.new("RGB", (1024, 1024), color = (255,255,255))
### Take text to be drawn on the image from the command terminal.
text = sys.argv[1]
### Chose favourite font and set size of the font.
fnt = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 150, encoding="unic")
d = ImageDraw.Draw(img)
d.text(xy=(320,420), text = text , font = fnt, fill=(0,0,0))
### Save image as .png file.
img.save(text+'.png')
|
normal
|
{
"blob_id": "053fa80c80d40cd28acb7d6a8bf1b2c30be9b36e",
"index": 7786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nd.text(xy=(320, 420), text=text, font=fnt, fill=(0, 0, 0))\nimg.save(text + '.png')\n",
"step-3": "<mask token>\nimg = Image.new('RGB', (1024, 1024), color=(255, 255, 255))\ntext = sys.argv[1]\nfnt = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf',\n 150, encoding='unic')\nd = ImageDraw.Draw(img)\nd.text(xy=(320, 420), text=text, font=fnt, fill=(0, 0, 0))\nimg.save(text + '.png')\n",
"step-4": "from PIL import Image, ImageDraw, ImageFont\nimport sys\nimg = Image.new('RGB', (1024, 1024), color=(255, 255, 255))\ntext = sys.argv[1]\nfnt = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf',\n 150, encoding='unic')\nd = ImageDraw.Draw(img)\nd.text(xy=(320, 420), text=text, font=fnt, fill=(0, 0, 0))\nimg.save(text + '.png')\n",
"step-5": "from PIL import Image, ImageDraw, ImageFont\nimport sys\n\n### Create 1024,1024 pixel image with a white background.\nimg = Image.new(\"RGB\", (1024, 1024), color = (255,255,255))\n\n### Take text to be drawn on the image from the command terminal.\ntext = sys.argv[1]\n\n### Chose favourite font and set size of the font.\nfnt = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", 150, encoding=\"unic\")\nd = ImageDraw.Draw(img)\n\nd.text(xy=(320,420), text = text , font = fnt, fill=(0,0,0))\n\n### Save image as .png file.\nimg.save(text+'.png')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 出现频率特别高的和频率特别低的词对于文本分析帮助不大,一般在预处理阶段会过滤掉。
# 在英文里,经典的停用词为 “The”, "an"....
# 方法1: 自己建立一个停用词词典
stop_words = ["the", "an", "is", "there"]
# 在使用时: 假设 word_list包含了文本里的单词
word_list = ["we", "are", "the", "students"]
filtered_words = [word for word in word_list if word not in stop_words]
print (filtered_words)
# 方法2:直接利用别人已经构建好的停用词库
from nltk.corpus import stopwords
cachedStopWords = stopwords.words("english")
from nltk.stem.porter import *
stemmer = PorterStemmer()
test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied',
'died', 'agreed', 'owned', 'humbled', 'sized',
'meeting', 'stating', 'siezing', 'itemization',
'sensational', 'traditional', 'reference', 'colonizer',
'plotted']
singles = [stemmer.stem(word) for word in test_strs]
print(' '.join(singles)) # doctest: +NORMALIZE_WHITESPACE
|
normal
|
{
"blob_id": "d14937aaa7a80d6b95825afa2a2d6ff8202e5f5c",
"index": 2498,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(filtered_words)\n<mask token>\nprint(' '.join(singles))\n",
"step-3": "stop_words = ['the', 'an', 'is', 'there']\nword_list = ['we', 'are', 'the', 'students']\nfiltered_words = [word for word in word_list if word not in stop_words]\nprint(filtered_words)\n<mask token>\ncachedStopWords = stopwords.words('english')\n<mask token>\nstemmer = PorterStemmer()\ntest_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died',\n 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing',\n 'itemization', 'sensational', 'traditional', 'reference', 'colonizer',\n 'plotted']\nsingles = [stemmer.stem(word) for word in test_strs]\nprint(' '.join(singles))\n",
"step-4": "stop_words = ['the', 'an', 'is', 'there']\nword_list = ['we', 'are', 'the', 'students']\nfiltered_words = [word for word in word_list if word not in stop_words]\nprint(filtered_words)\nfrom nltk.corpus import stopwords\ncachedStopWords = stopwords.words('english')\nfrom nltk.stem.porter import *\nstemmer = PorterStemmer()\ntest_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died',\n 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing',\n 'itemization', 'sensational', 'traditional', 'reference', 'colonizer',\n 'plotted']\nsingles = [stemmer.stem(word) for word in test_strs]\nprint(' '.join(singles))\n",
"step-5": "# 出现频率特别高的和频率特别低的词对于文本分析帮助不大,一般在预处理阶段会过滤掉。 \r\n# 在英文里,经典的停用词为 “The”, \"an\"....\r\n\r\n# 方法1: 自己建立一个停用词词典\r\nstop_words = [\"the\", \"an\", \"is\", \"there\"]\r\n# 在使用时: 假设 word_list包含了文本里的单词\r\nword_list = [\"we\", \"are\", \"the\", \"students\"]\r\nfiltered_words = [word for word in word_list if word not in stop_words]\r\nprint (filtered_words)\r\n\r\n# 方法2:直接利用别人已经构建好的停用词库\r\nfrom nltk.corpus import stopwords\r\ncachedStopWords = stopwords.words(\"english\")\r\n\r\nfrom nltk.stem.porter import *\r\nstemmer = PorterStemmer()\r\n\r\ntest_strs = ['caresses', 'flies', 'dies', 'mules', 'denied',\r\n 'died', 'agreed', 'owned', 'humbled', 'sized',\r\n 'meeting', 'stating', 'siezing', 'itemization',\r\n 'sensational', 'traditional', 'reference', 'colonizer',\r\n 'plotted']\r\n\r\nsingles = [stemmer.stem(word) for word in test_strs]\r\nprint(' '.join(singles)) # doctest: +NORMALIZE_WHITESPACE",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
def caesar( plaintext, key ):
if int( key ) < 0:
return
plaintext_ascii = [ ( ord( char ) + int( key ) ) for char in plaintext ]
for ascii in plaintext_ascii:
if ( ascii < 97 and ascii > 90 ) or ascii > 122:
ascii -= 25
ciphertext = ''.join( [ chr( ascii ) for ascii in plaintext_ascii ] )
print( 'ciphertext: {}'.format( ciphertext ) )
if __name__ == '__main__':
if len( sys.argv ) is not 3:
print( 'Usage: python caesar.py plaintext key' )
else:
caesar( sys.argv[1], sys.argv[2] )
|
normal
|
{
"blob_id": "9a7c6998e9e486f0497d3684f9c7a422c8e13521",
"index": 7076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef caesar(plaintext, key):\n if int(key) < 0:\n return\n plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]\n for ascii in plaintext_ascii:\n if ascii < 97 and ascii > 90 or ascii > 122:\n ascii -= 25\n ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])\n print('ciphertext: {}'.format(ciphertext))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef caesar(plaintext, key):\n if int(key) < 0:\n return\n plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]\n for ascii in plaintext_ascii:\n if ascii < 97 and ascii > 90 or ascii > 122:\n ascii -= 25\n ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])\n print('ciphertext: {}'.format(ciphertext))\n\n\nif __name__ == '__main__':\n if len(sys.argv) is not 3:\n print('Usage: python caesar.py plaintext key')\n else:\n caesar(sys.argv[1], sys.argv[2])\n",
"step-4": "import sys\n\n\ndef caesar(plaintext, key):\n if int(key) < 0:\n return\n plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]\n for ascii in plaintext_ascii:\n if ascii < 97 and ascii > 90 or ascii > 122:\n ascii -= 25\n ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])\n print('ciphertext: {}'.format(ciphertext))\n\n\nif __name__ == '__main__':\n if len(sys.argv) is not 3:\n print('Usage: python caesar.py plaintext key')\n else:\n caesar(sys.argv[1], sys.argv[2])\n",
"step-5": "import sys\n\ndef caesar( plaintext, key ):\n if int( key ) < 0:\n return\n\n plaintext_ascii = [ ( ord( char ) + int( key ) ) for char in plaintext ]\n for ascii in plaintext_ascii:\n if ( ascii < 97 and ascii > 90 ) or ascii > 122:\n ascii -= 25\n\n ciphertext = ''.join( [ chr( ascii ) for ascii in plaintext_ascii ] )\n print( 'ciphertext: {}'.format( ciphertext ) )\n\nif __name__ == '__main__':\n if len( sys.argv ) is not 3:\n print( 'Usage: python caesar.py plaintext key' )\n else:\n caesar( sys.argv[1], sys.argv[2] )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from packer.utils import hello_world
|
normal
|
{
"blob_id": "d549303228e860ae278a5a9497a4a3a68989aeca",
"index": 6097,
"step-1": "<mask token>\n",
"step-2": "from packer.utils import hello_world\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rc('font', **{'family': 'serif'})
<|reserved_special_token_0|>
for i, nafm in enumerate(nafms):
detuning = 6.44
a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',
datfile)
sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2
a1[:, 1] = sunits * a1[:, 1]
a1[:, 2] = sunits * a1[:, 2]
a2[:, 1] = sunits * a2[:, 1]
a2[:, 2] = sunits * a2[:, 2]
i % len(nafms)
ax = plt.subplot(gs[i % rows, i / rows])
ax.set_title('AFM = %d sites' % nafm)
a1s = unumpy.uarray(a1[:, 1], a1[:, 2])
a2s = unumpy.uarray(a2[:, 1], a2[:, 2])
a2a1 = a2s / a1s
a2a1_mean = unumpy.nominal_values(a2a1)
a2a1_std = unumpy.std_devs(a2a1)
ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth
=1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=
'o', mfc='lightblue', label='A2/A1')
ax.set_ylabel('A2/A1')
ax.grid()
ax.set_xlabel('Detuning from state 2 ($\\Gamma$)')
if nafm == 40:
ax.set_xlim(-10, 10)
figure.savefig('a2a1_detuning.png', dpi=140)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rc('font', **{'family': 'serif'})
datfile = 'data001/a2a1_detuning_allelastic.dat'
nafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]
cols = 2
rows = len(nafms) / 2 + len(nafms) % 2
figure = plt.figure(figsize=(10.8, 3.6 * rows))
gs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)
<|reserved_special_token_0|>
for i, nafm in enumerate(nafms):
detuning = 6.44
a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',
datfile)
sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2
a1[:, 1] = sunits * a1[:, 1]
a1[:, 2] = sunits * a1[:, 2]
a2[:, 1] = sunits * a2[:, 1]
a2[:, 2] = sunits * a2[:, 2]
i % len(nafms)
ax = plt.subplot(gs[i % rows, i / rows])
ax.set_title('AFM = %d sites' % nafm)
a1s = unumpy.uarray(a1[:, 1], a1[:, 2])
a2s = unumpy.uarray(a2[:, 1], a2[:, 2])
a2a1 = a2s / a1s
a2a1_mean = unumpy.nominal_values(a2a1)
a2a1_std = unumpy.std_devs(a2a1)
ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth
=1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=
'o', mfc='lightblue', label='A2/A1')
ax.set_ylabel('A2/A1')
ax.grid()
ax.set_xlabel('Detuning from state 2 ($\\Gamma$)')
if nafm == 40:
ax.set_xlim(-10, 10)
figure.savefig('a2a1_detuning.png', dpi=140)
<|reserved_special_token_1|>
import numpy as np
from scipy import stats
from statarray import statdat
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font', **{'family': 'serif'})
datfile = 'data001/a2a1_detuning_allelastic.dat'
nafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]
cols = 2
rows = len(nafms) / 2 + len(nafms) % 2
figure = plt.figure(figsize=(10.8, 3.6 * rows))
gs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)
import fetchdata
from uncertainties import unumpy
for i, nafm in enumerate(nafms):
detuning = 6.44
a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',
datfile)
sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2
a1[:, 1] = sunits * a1[:, 1]
a1[:, 2] = sunits * a1[:, 2]
a2[:, 1] = sunits * a2[:, 1]
a2[:, 2] = sunits * a2[:, 2]
i % len(nafms)
ax = plt.subplot(gs[i % rows, i / rows])
ax.set_title('AFM = %d sites' % nafm)
a1s = unumpy.uarray(a1[:, 1], a1[:, 2])
a2s = unumpy.uarray(a2[:, 1], a2[:, 2])
a2a1 = a2s / a1s
a2a1_mean = unumpy.nominal_values(a2a1)
a2a1_std = unumpy.std_devs(a2a1)
ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth
=1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=
'o', mfc='lightblue', label='A2/A1')
ax.set_ylabel('A2/A1')
ax.grid()
ax.set_xlabel('Detuning from state 2 ($\\Gamma$)')
if nafm == 40:
ax.set_xlim(-10, 10)
figure.savefig('a2a1_detuning.png', dpi=140)
<|reserved_special_token_1|>
import numpy as np
from scipy import stats
from statarray import statdat
#a2a1 = np.loadtxt('a2a1_130707_2300.dat')
#a2a1 = np.concatenate( (a2a1, np.loadtxt('a2a1_130708_1223.dat')), axis=0 )
#a2a1 = np.loadtxt('a2a1_130708_1654.dat')
#a2a1 = np.loadtxt('a2a1_130709_0030.dat')
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font',**{'family':'serif'})
# Data file
datfile = 'data001/a2a1_detuning_allelastic.dat'
# Values of nafm for which plots will be shown
nafms = [4,6,8,10,12,16,20,24,32,34,38,40]
cols = 2
rows = len(nafms)/2+len(nafms)%2
figure = plt.figure(figsize=(10.8,3.6*rows))
#figure.suptitle('Bragg')
gs = matplotlib.gridspec.GridSpec( rows,cols, wspace=0.6, hspace=0.42)
import fetchdata
from uncertainties import unumpy
for i,nafm in enumerate(nafms):
detuning = 6.44
a1, a2 = fetchdata.fetch_data_A1A2( {'afmsize':nafm, 'ai':0.}, 'det', datfile )
# Put the units in the cross section
sunits = 9 * (671e-7**2) / 16 / ( np.pi**2)
a1[:,1] = sunits*a1[:,1]
a1[:,2] = sunits*a1[:,2]
a2[:,1] = sunits*a2[:,1]
a2[:,2] = sunits*a2[:,2]
i % len(nafms)
ax = plt.subplot( gs[ i%rows, i/rows] )
ax.set_title('AFM = %d sites' % nafm)
a1s = unumpy.uarray( a1[:,1] , a1[:,2] )
a2s = unumpy.uarray( a2[:,1] , a2[:,2] )
a2a1 = a2s/ a1s
a2a1_mean = unumpy.nominal_values( a2a1 )
a2a1_std = unumpy.std_devs( a2a1)
#ax.errorbar( a1[:,0], a1[:,1], yerr=a1[:,2], \
# capsize=0., elinewidth = 1. ,\
# fmt='.', ecolor='red', mec='red', \
# mew=1., ms=5.,\
# marker='o', mfc='pink', \
# label="A1")
#ax.errorbar( a2[:,0], a2[:,1], yerr=a2[:,2], \
# capsize=0., elinewidth = 1. ,\
# fmt='.', ecolor='green', mec='green', \
# mew=1., ms=5.,\
# marker='o', mfc='limegreen', \
# label="A2")
#ax2 = ax.twinx()
ax.errorbar( a2[:,0], a2a1_mean , yerr=a2a1_std, \
capsize=0., elinewidth = 1. ,\
fmt='.', ecolor='blue', mec='blue', \
mew=1., ms=5.,\
marker='o', mfc='lightblue', \
label="A2/A1")
#ax2.set_ylabel('A2/A1')
ax.set_ylabel('A2/A1')
ax.grid()
ax.set_xlabel('Detuning from state 2 ($\Gamma$)')
#ax.set_ylabel('Cross section (cm$^{2}$)')
if nafm == 40:
ax.set_xlim(-10,10)
#plt.show()
figure.savefig('a2a1_detuning.png', dpi=140)
#pylab.clf()
|
flexible
|
{
"blob_id": "feac1092d1aaf70eb4d4df919e434cdc1aa9c826",
"index": 9171,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrc('font', **{'family': 'serif'})\n<mask token>\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n",
"step-3": "<mask token>\nrc('font', **{'family': 'serif'})\ndatfile = 'data001/a2a1_detuning_allelastic.dat'\nnafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]\ncols = 2\nrows = len(nafms) / 2 + len(nafms) % 2\nfigure = plt.figure(figsize=(10.8, 3.6 * rows))\ngs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)\n<mask token>\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n",
"step-4": "import numpy as np\nfrom scipy import stats\nfrom statarray import statdat\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import rc\nrc('font', **{'family': 'serif'})\ndatfile = 'data001/a2a1_detuning_allelastic.dat'\nnafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]\ncols = 2\nrows = len(nafms) / 2 + len(nafms) % 2\nfigure = plt.figure(figsize=(10.8, 3.6 * rows))\ngs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)\nimport fetchdata\nfrom uncertainties import unumpy\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n",
"step-5": "\nimport numpy as np\nfrom scipy import stats\nfrom statarray import statdat\n\n#a2a1 = np.loadtxt('a2a1_130707_2300.dat')\n#a2a1 = np.concatenate( (a2a1, np.loadtxt('a2a1_130708_1223.dat')), axis=0 )\n\n#a2a1 = np.loadtxt('a2a1_130708_1654.dat')\n#a2a1 = np.loadtxt('a2a1_130709_0030.dat')\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom matplotlib import rc\nrc('font',**{'family':'serif'})\n\n\n# Data file\ndatfile = 'data001/a2a1_detuning_allelastic.dat' \n\n# Values of nafm for which plots will be shown\nnafms = [4,6,8,10,12,16,20,24,32,34,38,40]\n\ncols = 2\nrows = len(nafms)/2+len(nafms)%2\n\nfigure = plt.figure(figsize=(10.8,3.6*rows))\n#figure.suptitle('Bragg')\ngs = matplotlib.gridspec.GridSpec( rows,cols, wspace=0.6, hspace=0.42) \n\nimport fetchdata\nfrom uncertainties import unumpy\n\nfor i,nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2( {'afmsize':nafm, 'ai':0.}, 'det', datfile )\n\n # Put the units in the cross section\n sunits = 9 * (671e-7**2) / 16 / ( np.pi**2)\n a1[:,1] = sunits*a1[:,1]\n a1[:,2] = sunits*a1[:,2]\n a2[:,1] = sunits*a2[:,1]\n a2[:,2] = sunits*a2[:,2]\n \n i % len(nafms) \n ax = plt.subplot( gs[ i%rows, i/rows] )\n ax.set_title('AFM = %d sites' % nafm)\n\n a1s = unumpy.uarray( a1[:,1] , a1[:,2] ) \n a2s = unumpy.uarray( a2[:,1] , a2[:,2] )\n\n a2a1 = a2s/ a1s\n \n a2a1_mean = unumpy.nominal_values( a2a1 )\n a2a1_std = unumpy.std_devs( a2a1)\n \n \n #ax.errorbar( a1[:,0], a1[:,1], yerr=a1[:,2], \\\n # capsize=0., elinewidth = 1. ,\\\n # fmt='.', ecolor='red', mec='red', \\\n # mew=1., ms=5.,\\\n # marker='o', mfc='pink', \\\n # label=\"A1\") \n\n #ax.errorbar( a2[:,0], a2[:,1], yerr=a2[:,2], \\\n # capsize=0., elinewidth = 1. ,\\\n # fmt='.', ecolor='green', mec='green', \\\n # mew=1., ms=5.,\\\n # marker='o', mfc='limegreen', \\\n # label=\"A2\") \n\n #ax2 = ax.twinx() \n ax.errorbar( a2[:,0], a2a1_mean , yerr=a2a1_std, \\\n capsize=0., elinewidth = 1. ,\\\n fmt='.', ecolor='blue', mec='blue', \\\n mew=1., ms=5.,\\\n marker='o', mfc='lightblue', \\\n label=\"A2/A1\") \n #ax2.set_ylabel('A2/A1') \n ax.set_ylabel('A2/A1') \n\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\Gamma$)')\n #ax.set_ylabel('Cross section (cm$^{2}$)')\n\n if nafm == 40:\n ax.set_xlim(-10,10)\n\n#plt.show()\nfigure.savefig('a2a1_detuning.png', dpi=140)\n#pylab.clf()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inception_2d_fields(img, fields, num_classes=30, is_training=True,
dropout_keep_prob=0.6, prediction_fn=layers_lib.softmax,
spatial_squeeze=True, reuse=None, scope='InceptionV1_Fields'):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.2),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002)):
net, end_points = inception_2d.inception_v1_base(img, scope=scope,
final_endpoint='Mixed_4b')
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope=
'AvgPool_0a_5x5')
net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
net = array_ops.squeeze(net, [1, 2], name='Squeeze4Fields')
net = tf.concat([net, fields], axis=1)
net = layers.fully_connected(inputs=net, num_outputs=1024)
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b'
)
logits = layers.fully_connected(inputs=net, num_outputs=
num_classes, activation_fn=None, weights_initializer=tf.
contrib.layers.xavier_initializer(), biases_initializer=tf.
constant_initializer(0.0), weights_regularizer=regularizers
.l2_regularizer(0.0002), biases_regularizer=regularizers.
l2_regularizer(0.0002), scope='InnerProduct')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name=
'SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope=
'Predictions')
return logits, end_points
<|reserved_special_token_1|>
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
import inception_2d
def inception_2d_fields(img, fields, num_classes=30, is_training=True,
dropout_keep_prob=0.6, prediction_fn=layers_lib.softmax,
spatial_squeeze=True, reuse=None, scope='InceptionV1_Fields'):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.2),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002)):
net, end_points = inception_2d.inception_v1_base(img, scope=scope,
final_endpoint='Mixed_4b')
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope=
'AvgPool_0a_5x5')
net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
net = array_ops.squeeze(net, [1, 2], name='Squeeze4Fields')
net = tf.concat([net, fields], axis=1)
net = layers.fully_connected(inputs=net, num_outputs=1024)
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b'
)
logits = layers.fully_connected(inputs=net, num_outputs=
num_classes, activation_fn=None, weights_initializer=tf.
contrib.layers.xavier_initializer(), biases_initializer=tf.
constant_initializer(0.0), weights_regularizer=regularizers
.l2_regularizer(0.0002), biases_regularizer=regularizers.
l2_regularizer(0.0002), scope='InnerProduct')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name=
'SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope=
'Predictions')
return logits, end_points
<|reserved_special_token_1|>
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
import inception_2d
def inception_2d_fields(img,
fields,
num_classes=30,
is_training=True,
dropout_keep_prob=0.6,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1_Fields'
):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.2),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002)):
net, end_points = inception_2d.inception_v1_base(img, scope=scope, final_endpoint='Mixed_4b')
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope='AvgPool_0a_5x5')
net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
net = array_ops.squeeze(net,[1,2],name='Squeeze4Fields')
net = tf.concat([net,fields],axis=1)
net = layers.fully_connected(inputs=net, num_outputs=1024)
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.fully_connected(inputs=net,
num_outputs=num_classes,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.0),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002),
scope='InnerProduct')
# logits = layers.conv2d(
# net,
# num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
|
flexible
|
{
"blob_id": "ca93f49fbdc1d64e0616bca035a6043b3cc80ddc",
"index": 1485,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef inception_2d_fields(img, fields, num_classes=30, is_training=True,\n dropout_keep_prob=0.6, prediction_fn=layers_lib.softmax,\n spatial_squeeze=True, reuse=None, scope='InceptionV1_Fields'):\n with arg_scope([layers.conv2d, layers_lib.fully_connected],\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.2),\n weights_regularizer=regularizers.l2_regularizer(0.0002),\n biases_regularizer=regularizers.l2_regularizer(0.0002)):\n net, end_points = inception_2d.inception_v1_base(img, scope=scope,\n final_endpoint='Mixed_4b')\n with variable_scope.variable_scope('Logits'):\n net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope=\n 'AvgPool_0a_5x5')\n net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)\n net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])\n net = array_ops.squeeze(net, [1, 2], name='Squeeze4Fields')\n net = tf.concat([net, fields], axis=1)\n net = layers.fully_connected(inputs=net, num_outputs=1024)\n net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b'\n )\n logits = layers.fully_connected(inputs=net, num_outputs=\n num_classes, activation_fn=None, weights_initializer=tf.\n contrib.layers.xavier_initializer(), biases_initializer=tf.\n constant_initializer(0.0), weights_regularizer=regularizers\n .l2_regularizer(0.0002), biases_regularizer=regularizers.\n l2_regularizer(0.0002), scope='InnerProduct')\n if spatial_squeeze:\n logits = array_ops.squeeze(logits, [1, 2], name=\n 'SpatialSqueeze')\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope=\n 'Predictions')\n return logits, end_points\n",
"step-3": "import tensorflow as tf\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import regularizers\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\nimport inception_2d\n\n\ndef inception_2d_fields(img, fields, num_classes=30, is_training=True,\n dropout_keep_prob=0.6, prediction_fn=layers_lib.softmax,\n spatial_squeeze=True, reuse=None, scope='InceptionV1_Fields'):\n with arg_scope([layers.conv2d, layers_lib.fully_connected],\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.2),\n weights_regularizer=regularizers.l2_regularizer(0.0002),\n biases_regularizer=regularizers.l2_regularizer(0.0002)):\n net, end_points = inception_2d.inception_v1_base(img, scope=scope,\n final_endpoint='Mixed_4b')\n with variable_scope.variable_scope('Logits'):\n net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope=\n 'AvgPool_0a_5x5')\n net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)\n net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])\n net = array_ops.squeeze(net, [1, 2], name='Squeeze4Fields')\n net = tf.concat([net, fields], axis=1)\n net = layers.fully_connected(inputs=net, num_outputs=1024)\n net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b'\n )\n logits = layers.fully_connected(inputs=net, num_outputs=\n num_classes, activation_fn=None, weights_initializer=tf.\n contrib.layers.xavier_initializer(), biases_initializer=tf.\n constant_initializer(0.0), weights_regularizer=regularizers\n .l2_regularizer(0.0002), biases_regularizer=regularizers.\n l2_regularizer(0.0002), scope='InnerProduct')\n if spatial_squeeze:\n logits = array_ops.squeeze(logits, [1, 2], name=\n 'SpatialSqueeze')\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope=\n 'Predictions')\n return logits, end_points\n",
"step-4": "import tensorflow as tf\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import regularizers\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\n\nimport inception_2d\n\ndef inception_2d_fields(img,\n fields,\n num_classes=30,\n is_training=True,\n dropout_keep_prob=0.6,\n prediction_fn=layers_lib.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='InceptionV1_Fields'\n ):\n with arg_scope([layers.conv2d, layers_lib.fully_connected],\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.2),\n weights_regularizer=regularizers.l2_regularizer(0.0002),\n biases_regularizer=regularizers.l2_regularizer(0.0002)):\n net, end_points = inception_2d.inception_v1_base(img, scope=scope, final_endpoint='Mixed_4b')\n with variable_scope.variable_scope('Logits'):\n net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope='AvgPool_0a_5x5')\n net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)\n net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])\n net = array_ops.squeeze(net,[1,2],name='Squeeze4Fields')\n net = tf.concat([net,fields],axis=1)\n net = layers.fully_connected(inputs=net, num_outputs=1024)\n net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')\n logits = layers.fully_connected(inputs=net,\n num_outputs=num_classes,\n activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.0),\n weights_regularizer=regularizers.l2_regularizer(0.0002),\n biases_regularizer=regularizers.l2_regularizer(0.0002),\n scope='InnerProduct')\n # logits = layers.conv2d(\n # net,\n # num_classes, [1, 1],\n # activation_fn=None,\n # normalizer_fn=None,\n # scope='Conv2d_0c_1x1')\n if spatial_squeeze:\n logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')\n\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n\n\n return logits, end_points\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import paho.mqtt.client as mqtt
# Fetch the service account key JSON file contents
cred = credentials.Certificate('iot_mikro.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://mikro-b4844.firebaseio.com/'
})
ref = db.reference('lampu')
print(ref.get())
i=0
while True:
print(ref.get())
if ref.get()=="Off" and i==0 :
i=1
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "Off")
if ref.get()=="On" and i==1 :
i=0
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "On")
# client.disconnect();
|
normal
|
{
"blob_id": "acff8618754658104ac36214901d346447a0134f",
"index": 811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\n<mask token>\nprint(ref.get())\n<mask token>\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-3": "<mask token>\ncred = credentials.Certificate('iot_mikro.json')\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\nref = db.reference('lampu')\nprint(ref.get())\ni = 0\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-4": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport paho.mqtt.client as mqtt\ncred = credentials.Certificate('iot_mikro.json')\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\nref = db.reference('lampu')\nprint(ref.get())\ni = 0\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-5": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport paho.mqtt.client as mqtt\n\n# Fetch the service account key JSON file contents\ncred = credentials.Certificate('iot_mikro.json')\n# Initialize the app with a service account, granting admin privileges\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://mikro-b4844.firebaseio.com/'\n})\n\nref = db.reference('lampu')\nprint(ref.get())\ni=0\nwhile True:\n print(ref.get())\n if ref.get()==\"Off\" and i==0 :\n i=1\n client = mqtt.Client()\n client.connect(\"127.0.0.1\",1883,60)\n client.publish(\"building/lampu\", \"Off\")\n if ref.get()==\"On\" and i==1 :\n i=0\n client = mqtt.Client()\n client.connect(\"127.0.0.1\",1883,60)\n client.publish(\"building/lampu\", \"On\")\n# client.disconnect();\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
from __future__ import print_function
import weechat
import sys
import pickle
import json
import math
import os.path
from datetime import datetime
from datetime import date
from datetime import timedelta
from dateutil.parser import parse as datetime_parse
from os.path import expanduser
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# TODO: Add settings
# minutes_remaining = [5, 10, 15]
# notify_enabled = yes/no
# time_format = '%H:%M' ???
SCRIPT_NAME = 'weechat-gcal'
SCRIPT_AUTHOR = 'Dave Mulford'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL2'
SCRIPT_DESC = 'A Google Calendar integration script that provides notifications of upcoming events.'
SCRIPT_SHUTDOWN_FN = ''
SCRIPT_CHARSET = ''
TIMEOUT_MS = 3000
CALLED_FROM_CMD = '100'
CALLED_FROM_TIMER = '200'
NOTIFICATION_THRESHOLDS = [5,15]
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
# Where the weechat-gcal-token.pickle file is located
CACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')
# =============================
# GOOGLE CALENDAR FUNCTIONS
# =============================
def _load_credentials(creds_file=None):
"""Loads the credentials from a credentials.json file or by prompting for authentication.
Returns a credentials object to be used by the Google Sheets API.
"""
creds = None
# Validate the credentials file
if not creds_file:
creds_file = 'credentials.json'
if not os.path.exists(creds_file):
creds_file = os.path.join(expanduser('~'), 'credentials.json')
if not os.path.exists(creds_file):
raise SystemExit('Could not find a credentials.json file. ' \
'Either pass one as argument or make sure credentials.json exists in ' \
'the current directory or ' + expanduser('~'))
# Creates CACHE_DIR if it does not exist
# mode 0x777 (the default) is used because the system's umask value is masked out first
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time.
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(pickle_filename, 'wb') as token:
pickle.dump(creds, token)
return creds
def gc_get_events(num_events=50):
creds = _load_credentials()
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
tomorrow = datetime.combine( \
date.today() + timedelta(days=2), \
datetime.min.time()) \
.isoformat() + 'Z'
#print('Getting the upcoming {} events between {} and {}'.format(num_events, now, tomorrow))
events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=tomorrow,
maxResults=num_events, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return events
# =============================
# WEECHAT HELPER FUNCTIONS
# =============================
def buffer_get():
"""Finds or creates a buffer to use for script output.
Returns a buffer pointer.
"""
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', 'Google Calendar')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
return buffer
def buffer_input(data, buffer, input_data):
"""A function called when text, that is not a command, is entered
in the weechat-gcal buffer. This function exists to prevent
errors from being shown, there is no functionality.
"""
return weechat.WEECHAT_RC_OK
def update_gcal_buffer(buffer, events):
weechat.buffer_clear(buffer)
if events == []:
weechat.prnt(buffer, 'No events for now. YAY!!!')
dates = {}
for event in events:
dt = datetime_parse(event['date'])
datestr = dt.strftime('%a %Y-%m-%d')
timestr = dt.strftime('%H:%M')
if datestr not in dates:
dates[datestr] = []
dates[datestr].append({
'time': timestr,
'summary': event['summary']
})
for datestr in dates.keys():
weechat.prnt(buffer, datestr)
dt_events = dates[datestr]
for event in dt_events:
weechat.prnt(buffer, '{} {}'.format(event['time'], event['summary']))
# =============================
# MAIN SCRIPT FUNCTIONS
# =============================
def get_calendar(*args):
result = []
try:
events = gc_get_events()
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
result.append({
'date': start,
'summary': event['summary']
})
except Exception as err:
result = err
return json.dumps(result)
def get_calendar_callback(data, command, return_code, out, err):
result = json.loads(out)
buffer = buffer_get()
update_gcal_buffer(buffer, result)
# Notify if any events are happening in 10 minutes!
if data == CALLED_FROM_TIMER:
for event in result:
#weechat.prnt(buffer, 'Handling event!')
dt = datetime_parse(event['date'])
now = datetime.now(tz=dt.tzinfo)
timediff = dt - now
minutes_remaining = math.ceil(timediff.total_seconds() / 60)
#weechat.prnt(buffer, '{} - {} = {} ({} mins)'.format(dt, now, timediff, minutes_remaining))
# TODO Make minutes_remaining threshold configurable
if minutes_remaining in NOTIFICATION_THRESHOLDS:
msg = '[{}m] {}'.format(minutes_remaining, event['summary'])
weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)
return weechat.WEECHAT_RC_OK
def gcal_command(data, buffer, args):
buffer = buffer_get()
# TODO Implement init
if args == 'init':
pass
else:
weechat.hook_process(
'func:get_calendar',
TIMEOUT_MS,
'get_calendar_callback',
CALLED_FROM_CMD
)
return weechat.WEECHAT_RC_OK
def script_main(data, remaining_calls):
# Weechat is single-threaded so a new process is created so other things aren't held up
# if retrieving Google Calendar events doesn't return in a timely manner.
# https://weechat.org/files/doc/stable/weechat_scripting.en.html#weechat_architecture
weechat.hook_process(
'func:get_calendar',
TIMEOUT_MS,
'get_calendar_callback',
CALLED_FROM_TIMER
)
return weechat.WEECHAT_RC_OK
# Register the script on /script load
# This needs to happen first!
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, \
SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)
# Setup a command to initialize the Google Calendar authentication and show events in a buffer.
weechat.hook_command(
'gcal',
'Displays events for today and tomorrow in a new buffer.',
'[init]',
' || init - Initializes the items needed for this plugin to work.',
'',
'gcal_command',
''
)
# Check once per minute whether we should notify of imminent events
weechat.hook_timer(60000, 60, 0, 'script_main', '')
|
normal
|
{
"blob_id": "0ed0fb6f9bcc768bb005222c9ae9b454f6d962ec",
"index": 9148,
"step-1": "<mask token>\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\n<mask token>\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-3": "<mask token>\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = (\n 'A Google Calendar integration script that provides notifications of upcoming events.'\n )\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\nTIMEOUT_MS = 3000\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\nNOTIFICATION_THRESHOLDS = [5, 15]\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-4": "from __future__ import print_function\nimport weechat\nimport sys\nimport pickle\nimport json\nimport math\nimport os.path\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.parser import parse as datetime_parse\nfrom os.path import expanduser\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = (\n 'A Google Calendar integration script that provides notifications of upcoming events.'\n )\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\nTIMEOUT_MS = 3000\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\nNOTIFICATION_THRESHOLDS = [5, 15]\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport weechat\nimport sys\nimport pickle\nimport json\nimport math\nimport os.path\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.parser import parse as datetime_parse\nfrom os.path import expanduser\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\n# TODO: Add settings\n# minutes_remaining = [5, 10, 15]\n# notify_enabled = yes/no\n# time_format = '%H:%M' ???\n\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = 'A Google Calendar integration script that provides notifications of upcoming events.'\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\n\nTIMEOUT_MS = 3000\n\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\n\nNOTIFICATION_THRESHOLDS = [5,15]\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\n\n# Where the weechat-gcal-token.pickle file is located\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n# =============================\n# GOOGLE CALENDAR FUNCTIONS\n# =============================\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n tomorrow = datetime.combine( \\\n date.today() + timedelta(days=2), \\\n datetime.min.time()) \\\n .isoformat() + 'Z'\n\n #print('Getting the upcoming {} events between {} and {}'.format(num_events, now, tomorrow))\n events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=tomorrow,\n maxResults=num_events, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n# =============================\n# WEECHAT HELPER FUNCTIONS\n# =============================\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n\n return buffer\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n\n if datestr not in dates:\n dates[datestr] = []\n\n dates[datestr].append({\n 'time': timestr,\n 'summary': event['summary']\n })\n\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event['summary']))\n\n# =============================\n# MAIN SCRIPT FUNCTIONS\n# =============================\n\ndef get_calendar(*args):\n result = []\n\n try:\n events = gc_get_events()\n\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({\n 'date': start,\n 'summary': event['summary']\n })\n except Exception as err:\n result = err\n\n return json.dumps(result)\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n\n # Notify if any events are happening in 10 minutes!\n if data == CALLED_FROM_TIMER:\n for event in result:\n #weechat.prnt(buffer, 'Handling event!')\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n\n #weechat.prnt(buffer, '{} - {} = {} ({} mins)'.format(dt, now, timediff, minutes_remaining))\n\n # TODO Make minutes_remaining threshold configurable\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n\n return weechat.WEECHAT_RC_OK\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n\n # TODO Implement init\n if args == 'init':\n pass\n else:\n weechat.hook_process(\n 'func:get_calendar',\n TIMEOUT_MS,\n 'get_calendar_callback',\n CALLED_FROM_CMD\n )\n\n return weechat.WEECHAT_RC_OK\n\ndef script_main(data, remaining_calls):\n # Weechat is single-threaded so a new process is created so other things aren't held up\n # if retrieving Google Calendar events doesn't return in a timely manner.\n # https://weechat.org/files/doc/stable/weechat_scripting.en.html#weechat_architecture\n weechat.hook_process(\n 'func:get_calendar',\n TIMEOUT_MS,\n 'get_calendar_callback',\n CALLED_FROM_TIMER\n )\n\n return weechat.WEECHAT_RC_OK\n\n# Register the script on /script load\n# This needs to happen first!\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, \\\n SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\n\n# Setup a command to initialize the Google Calendar authentication and show events in a buffer.\nweechat.hook_command(\n 'gcal',\n 'Displays events for today and tomorrow in a new buffer.',\n '[init]',\n ' || init - Initializes the items needed for this plugin to work.',\n '',\n 'gcal_command',\n ''\n)\n\n# Check once per minute whether we should notify of imminent events\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-ids": [
7,
10,
11,
12,
13
]
}
|
[
7,
10,
11,
12,
13
] |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from .models import Document, Organization, UserProfile, Shop
#from .forms import DocUploadForm, ShopEditForm
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import *
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
# widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}
# autocomplete_fields = ('tags','topic','university',)
exclude = ['organization','private_user','is_public','is_user_private','display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'), required=False)
address = forms.CharField(widget= forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),
label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
# def clean_email(self):
# if 'email' in self.cleaned_data:
# try:
# user = User.objects.get(username= self.cleaned_data["username"])
# raise forms.ValidationError(_(u'Already this Username is Registered'))
# except User.DoesNotExist:
# pass
# return self.cleaned_data["email"]
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude','longitude','is_active']
@login_required
def indexEmp(request):
context = {'shop':shopid}
return render(request,'index.html',context)
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if(request.method=='POST'):
# import ipdb; ipdb.set_trace();
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
data = DocUploadForm(request.POST,request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if(user.userType == 1 ):
return HttpResponseRedirect(reverse('documentListOwner'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if(user.userType == 1 ):
context = { "docUploadForm" : form}
return render(request,'printo_app/docUpload-owner.html',context)
if(user.userType == 2 ):
shopRate = Shop.objects.get(employee=request.user).rate
context = { "docUploadForm" : form,"rate":shopRate }
return render(request,'printo_app/docUpload-emp.html',context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')
context = {"docs":docList}
return render(request,'printo_app/docList-emp.html',context)
@login_required
def docListOwner(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
@login_required
def docDetail(request,docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance = docDetail)
context = {"docEditForm":form,"doc":docDetail}
return render(request,'printo_app/docDetail.html',context)
@login_required
def docEditSave(request,docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)
docDetail.save()
context = { "msg":docDetail }
return HttpResponseRedirect(reverse('documentList'))
@login_required
def shopProfile(request,shopid=None):
context = {}
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
pass
elif(user.userType == 2):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm()
context = {'shopForm':shopForm,'details':shop}
return render(request,'printo_app/shopProfile.html',context)
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST,instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
@login_required
def indexEmp(request,shopid=None):
user = UserProfile.objects.get(user=request.user)
is_owner = False
if(user.userType == 1):
is_owner = True
elif(user.userType == 2):
is_owner = False
context = {'is_owner':is_owner}
return HttpResponseRedirect(reverse('orderList'))
@login_required
def orderList(request,shopid=None):
shop = Shop.objects.get(employee = request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {"orders":orderList,"new_count":new_count,"pending_count":pending_count,"completed_count":completed_count,"delivered_count":delivered_count}
return render(request,'printo_app/ordersList.html',context)
@login_required
def shopList(request):
org = Organization.objects.get(owner = request.user)
shops = Shop.objects.filter(owner = org )
context={'shops' : shops}
return render(request,'printo_app/shopList.html',context)
@login_required
def shopCreate(request):
uprofile =get_object_or_404(UserProfile, user=request.user)
if uprofile.userType==1:
pass
else:
return HttpResponse("You don't have permission")
if(request.method=='POST'):
form = ShopForm(request.POST)
import ipdb; ipdb.set_trace()
if(form.is_valid()):
username = form.cleaned_data.get("username", None)
password = form.cleaned_data.get("password", None)
telephone = form.cleaned_data.get("telephone", None)
email = request.user.email
# email = form.cleaned_data.get("email", None)
# if email == None:
# email = request.user.email
if username != None:
user = User.objects.create_user(username=username,email=email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone !=None:
userprofile.telephone = telephone
userprofile.save()
# shop = Shop()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner = request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get("shopName", None)
shopprofile.pincode = form.cleaned_data.get("pincode",None)
shopprofile.address = form.cleaned_data.get("address",None)
shopprofile.latitude = form.cleaned_data.get("latitude",None)
shopprofile.longitude = form.cleaned_data.get("longitude",None)
shopprofile.telephone = form.cleaned_data.get("telephone",None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get("services",None)
# shop.save_m2m()
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = { 'shopCreateForm' : shopform, 'userForm' : userform }
return render(request,'printo_app/shopCreate.html',context)
@login_required
def index(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
return HttpResponseRedirect(reverse('OwnerMain'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('EmployeeMain'))
return None
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username= self.cleaned_data["email"])
raise forms.ValidationError(_(u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data["email"]
def index_main(request):
if request.user.is_authenticated()==True:
return HttpResponseRedirect(reverse("main"))
else:
if request.method=="POST":
form= RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data["email"], form.cleaned_data["email"], form.cleaned_data["password"],)
# Send a mail with verification code
profile = UserProfile()
profile.user =u
profile.userType =1
profile.mobile = form.cleaned_data["mobile"]
profile.save()
org= Organization()
org.owner = u
org.save()
return HttpResponse("Thanks")
else:
form =RegistrationForm()
return render( request, 'index_main.html', context={"form":form},)
def docListOwner(request):
pass
def docUploadOwner(request):
pass
@login_required
def indexOwner(request):
context = {}
return render(request,'ownerMain.html',context)
# ====================================
# DATA PROVIDERS
# ====================================
import json
from django.core import serializers
def get_universitys(request):
p={}
# import ipdb; ipdb.set_trace()
for c in University.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_publishers(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Publisher.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_courses(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Course.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_topics(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Topic.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_tags(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Tag.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_services(request):
p={}
# import ipdb; ipdb.set_trace()
for c in Service.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_colleges(request):
p={}
for c in College.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
def get_cities(request):
p={}
for c in City.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
|
normal
|
{
"blob_id": "d2c5d306591216e100b5bd8e8822b24fd137d092",
"index": 9208,
"step-1": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n<mask token>\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n<mask token>\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n<mask token>\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n<mask token>\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-2": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n<mask token>\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n<mask token>\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n<mask token>\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n<mask token>\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-3": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n@login_required\ndef indexEmp(request):\n context = {'shop': shopid}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n@login_required\ndef shopProfile(request, shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n pass\n elif user.userType == 2:\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm': shopForm, 'details': shop}\n return render(request, 'printo_app/shopProfile.html', context)\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n@login_required\ndef indexEmp(request, shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if user.userType == 1:\n is_owner = True\n elif user.userType == 2:\n is_owner = False\n context = {'is_owner': is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\ndef docUploadOwner(request):\n pass\n\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request, 'ownerMain.html', context)\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_courses(request):\n p = {}\n for c in Course.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_services(request):\n p = {}\n for c in Service.objects.all():\n p[c.name] = c.name, c.id\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-4": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n@login_required\ndef indexEmp(request):\n context = {'shop': shopid}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n@login_required\ndef docListOwner(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n@login_required\ndef shopProfile(request, shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n pass\n elif user.userType == 2:\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm': shopForm, 'details': shop}\n return render(request, 'printo_app/shopProfile.html', context)\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n@login_required\ndef indexEmp(request, shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if user.userType == 1:\n is_owner = True\n elif user.userType == 2:\n is_owner = False\n context = {'is_owner': is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\ndef docUploadOwner(request):\n pass\n\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request, 'ownerMain.html', context)\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_courses(request):\n p = {}\n for c in Course.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_services(request):\n p = {}\n for c in Service.objects.all():\n p[c.name] = c.name, c.id\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_colleges(request):\n p = {}\n for c in College.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-5": "from django.shortcuts import render\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .models import Document, Organization, UserProfile, Shop\n#from .forms import DocUploadForm, ShopEditForm\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import get_object_or_404\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom .models import *\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n class Meta:\n model = Document\n # widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}\n # autocomplete_fields = ('tags','topic','university',)\n exclude = ['organization','private_user','is_public','is_user_private','display']\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),\n label=_(u'email address'), required=False)\n \n address = forms.CharField(widget= forms.Textarea())\n pincode = forms.IntegerField()\n \n nearest_college = forms.CharField(max_length=200, required=False)\n \n nearest_town = forms.CharField(max_length=200, required=False)\n \n telephone = forms.CharField(max_length=14)\n \n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),\n label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),\n label=_(u'Password'))\n \n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n \n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data \n\n\n # def clean_email(self):\n # if 'email' in self.cleaned_data:\n\n # try:\n # user = User.objects.get(username= self.cleaned_data[\"username\"])\n # raise forms.ValidationError(_(u'Already this Username is Registered'))\n \n # except User.DoesNotExist:\n \n # pass\n # return self.cleaned_data[\"email\"]\n\nclass ShopEditForm(forms.ModelForm):\n class Meta:\n model = Shop\n exclude = ['latitude','longitude','is_active']\n\n@login_required\ndef indexEmp(request):\n context = {'shop':shopid}\n return render(request,'index.html',context)\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if(request.method=='POST'):\n # import ipdb; ipdb.set_trace();\n \n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n elif(user.userType == 2):\n org = Organization.objects.get(employee = request.user)\n\n data = DocUploadForm(request.POST,request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m() \n if(user.userType == 1 ):\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif(user.userType == 2):\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if(user.userType == 1 ):\n context = { \"docUploadForm\" : form}\n return render(request,'printo_app/docUpload-owner.html',context)\n if(user.userType == 2 ):\n shopRate = Shop.objects.get(employee=request.user).rate\n context = { \"docUploadForm\" : form,\"rate\":shopRate }\n return render(request,'printo_app/docUpload-emp.html',context)\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org)\n context = {\"docs\":docList}\n return render(request,'printo_app/docList-owner.html',context)\n elif(user.userType == 2):\n org = Organization.objects.get(employee = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')\n \n context = {\"docs\":docList}\n return render(request,'printo_app/docList-emp.html',context)\n\n@login_required\ndef docListOwner(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org)\n context = {\"docs\":docList}\n return render(request,'printo_app/docList-owner.html',context)\n\n@login_required\ndef docDetail(request,docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance = docDetail)\n context = {\"docEditForm\":form,\"doc\":docDetail}\n return render(request,'printo_app/docDetail.html',context)\n\n@login_required\ndef docEditSave(request,docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)\n docDetail.save() \n context = { \"msg\":docDetail }\n return HttpResponseRedirect(reverse('documentList'))\n\n@login_required\ndef shopProfile(request,shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1):\n pass\n elif(user.userType == 2):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm':shopForm,'details':shop}\n return render(request,'printo_app/shopProfile.html',context)\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST,instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n@login_required\ndef indexEmp(request,shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if(user.userType == 1):\n is_owner = True\n elif(user.userType == 2):\n is_owner = False\n context = {'is_owner':is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n@login_required\ndef orderList(request,shopid=None):\n shop = Shop.objects.get(employee = request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {\"orders\":orderList,\"new_count\":new_count,\"pending_count\":pending_count,\"completed_count\":completed_count,\"delivered_count\":delivered_count}\n return render(request,'printo_app/ordersList.html',context)\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner = request.user)\n shops = Shop.objects.filter(owner = org )\n context={'shops' : shops}\n return render(request,'printo_app/shopList.html',context)\n\n@login_required\ndef shopCreate(request):\n uprofile =get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType==1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n \n if(request.method=='POST'):\n form = ShopForm(request.POST)\n import ipdb; ipdb.set_trace()\n if(form.is_valid()):\n username = form.cleaned_data.get(\"username\", None)\n password = form.cleaned_data.get(\"password\", None)\n telephone = form.cleaned_data.get(\"telephone\", None)\n email = request.user.email\n # email = form.cleaned_data.get(\"email\", None)\n # if email == None:\n # email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username,email=email, password=password)\n \n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone !=None:\n userprofile.telephone = telephone \n userprofile.save()\n \n # shop = Shop()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner = request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get(\"shopName\", None)\n shopprofile.pincode = form.cleaned_data.get(\"pincode\",None)\n shopprofile.address = form.cleaned_data.get(\"address\",None)\n shopprofile.latitude = form.cleaned_data.get(\"latitude\",None)\n shopprofile.longitude = form.cleaned_data.get(\"longitude\",None)\n shopprofile.telephone = form.cleaned_data.get(\"telephone\",None)\n \n shopprofile.save()\n shopprofile.services = form.cleaned_data.get(\"services\",None)\n # shop.save_m2m()\n\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n\n shopform = ShopForm()\n context = { 'shopCreateForm' : shopform, 'userForm' : userform }\n return render(request,'printo_app/shopCreate.html',context)\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1):\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif(user.userType == 2):\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\nclass RegistrationForm(forms.Form):\n \n \n \n email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),\n label=_(u'email address'))\n \n password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),\n label=_(u'Password'))\n \n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n \n mobile = forms.CharField(max_length=14)\n \n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data \n\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n \n try:\n user = User.objects.get(username= self.cleaned_data[\"email\"])\n raise forms.ValidationError(_(u'Already Email Address is registered'))\n \n except User.DoesNotExist:\n pass\n return self.cleaned_data[\"email\"]\n\ndef index_main(request):\n if request.user.is_authenticated()==True:\n return HttpResponseRedirect(reverse(\"main\"))\n else:\n if request.method==\"POST\":\n form= RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data[\"email\"], form.cleaned_data[\"email\"], form.cleaned_data[\"password\"],)\n # Send a mail with verification code\n profile = UserProfile()\n profile.user =u\n profile.userType =1\n profile.mobile = form.cleaned_data[\"mobile\"]\n profile.save()\n \n org= Organization()\n org.owner = u\n org.save()\n return HttpResponse(\"Thanks\") \n else:\n form =RegistrationForm()\n return render( request, 'index_main.html', context={\"form\":form},)\n\n \ndef docListOwner(request):\n pass\ndef docUploadOwner(request):\n pass\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request,'ownerMain.html',context)\n\n# ====================================\n# DATA PROVIDERS\n# ====================================\nimport json\nfrom django.core import serializers\n\ndef get_universitys(request):\n p={}\n # import ipdb; ipdb.set_trace()\n for c in University.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_publishers(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Publisher.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_courses(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Course.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_topics(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Topic.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_tags(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Tag.objects.all():\n p[c.name] = (c.name,c.id)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_services(request):\n p={}\n # import ipdb; ipdb.set_trace()\n for c in Service.objects.all():\n p[c.name] = (c.name,c.id)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_colleges(request):\n p={}\n for c in College.objects.all():\n p[c.name] =(str(c.latitude), str(c.longitude))\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_cities(request):\n p={}\n for c in City.objects.all():\n p[c.name] =(str(c.latitude), str(c.longitude))\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n",
"step-ids": [
23,
24,
32,
34,
37
]
}
|
[
23,
24,
32,
34,
37
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(argv):
for com in argv:
with open(com, 'rb') as f:
txt = f.read()
if 'tzvp tzvpfit' in txt:
parts = txt.split('tzvp tzvpfit', 1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
elif 'tzvp\ntzvpfit' in txt:
parts = txt.split('tzvp\ntzvpfit', 1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(argv):
for com in argv:
with open(com, 'rb') as f:
txt = f.read()
if 'tzvp tzvpfit' in txt:
parts = txt.split('tzvp tzvpfit', 1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
elif 'tzvp\ntzvpfit' in txt:
parts = txt.split('tzvp\ntzvpfit', 1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
if __name__ == '__main__':
main(sys.argv[1:])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
def main(argv):
for com in argv:
with open(com, 'rb') as f:
txt = f.read()
if 'tzvp tzvpfit' in txt:
parts = txt.split('tzvp tzvpfit', 1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
elif 'tzvp\ntzvpfit' in txt:
parts = txt.split('tzvp\ntzvpfit', 1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
if __name__ == '__main__':
main(sys.argv[1:])
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 16:38:15 2013
@author: a92549
Fixes lack of / between tzvp and tzvpfit
"""
import sys
def main(argv):
for com in argv:
with open(com, 'rb') as f:
txt = f.read()
if 'tzvp tzvpfit' in txt:
parts = txt.split('tzvp tzvpfit',1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
elif 'tzvp\ntzvpfit' in txt:
parts = txt.split('tzvp\ntzvpfit',1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
if __name__ == '__main__':
main(sys.argv[1:])
|
flexible
|
{
"blob_id": "85974e48c7eafdf39379559820ed7f0bdc07fb7a",
"index": 3680,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(argv):\n for com in argv:\n with open(com, 'rb') as f:\n txt = f.read()\n if 'tzvp tzvpfit' in txt:\n parts = txt.split('tzvp tzvpfit', 1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n elif 'tzvp\\ntzvpfit' in txt:\n parts = txt.split('tzvp\\ntzvpfit', 1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(argv):\n for com in argv:\n with open(com, 'rb') as f:\n txt = f.read()\n if 'tzvp tzvpfit' in txt:\n parts = txt.split('tzvp tzvpfit', 1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n elif 'tzvp\\ntzvpfit' in txt:\n parts = txt.split('tzvp\\ntzvpfit', 1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n",
"step-4": "<mask token>\nimport sys\n\n\ndef main(argv):\n for com in argv:\n with open(com, 'rb') as f:\n txt = f.read()\n if 'tzvp tzvpfit' in txt:\n parts = txt.split('tzvp tzvpfit', 1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n elif 'tzvp\\ntzvpfit' in txt:\n parts = txt.split('tzvp\\ntzvpfit', 1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 12 16:38:15 2013\n\n@author: a92549\n\n\nFixes lack of / between tzvp and tzvpfit\n\n\"\"\"\n\nimport sys\n\ndef main(argv):\n for com in argv:\n with open(com, 'rb') as f:\n txt = f.read()\n if 'tzvp tzvpfit' in txt:\n parts = txt.split('tzvp tzvpfit',1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n elif 'tzvp\\ntzvpfit' in txt:\n parts = txt.split('tzvp\\ntzvpfit',1)\n new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]\n with open(com, 'wb') as f:\n f.write(new_txt)\n \n \nif __name__ == '__main__':\n main(sys.argv[1:])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
<|reserved_special_token_0|>
def format_dataset_filename(sample_label, lane=None, read=None, ext=None,
uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join([filename, lane, read]) if lane else '_'.join([
filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
<|reserved_special_token_0|>
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error('While touching {} file: {}'.format(path, e.strerror))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
<|reserved_special_token_0|>
def format_dataset_filename(sample_label, lane=None, read=None, ext=None,
uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join([filename, lane, read]) if lane else '_'.join([
filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug('config file paths: {}'.format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error('While touching {} file: {}'.format(path, e.strerror))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SAMPLES_WITHOUT_BARCODES = [2, 8]
DEFAULT_INDEX_CYCLES = dict(index='8', index1='8')
PROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml', 'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(
path, logger, force)
def sanitize_filename(filename):
valid_chars = '-_.%s%s' % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None,
uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join([filename, lane, read]) if lane else '_'.join([
filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug('config file paths: {}'.format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error('While touching {} file: {}'.format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
def get_md5(file_handle):
hasher = hashlib.md5()
for chunk in read_chunks(file_handle):
hasher.update(chunk)
return hasher.hexdigest()
def check_progress_status(root_path, started_file, completed_file):
localroot, dirnames, filenames = os.walk(root_path).next()
if started_file not in filenames:
return PROGRESS_STATUS.get('TODO')
elif completed_file not in filenames:
return PROGRESS_STATUS.get('STARTED')
else:
started_file = os.path.join(root_path, started_file)
completed_file = os.path.join(root_path, completed_file)
if os.path.getmtime(started_file) > os.path.getmtime(completed_file):
return PROGRESS_STATUS.get('STARTED')
return PROGRESS_STATUS.get('COMPLETED')
def runJob(cmd, logger):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=
subprocess.STDOUT)
output = process.communicate()[0]
ret = process.wait()
return True
except subprocess.CalledProcessError as e:
logger.info(e)
if e.output:
logger.info('command output: %s', e.output)
else:
logger.info('no command output available')
return False
<|reserved_special_token_1|>
"""
Utilities used by other modules.
"""
import csv
import datetime
import hashlib
import json
import re
import string
import subprocess
import uuid
import xml.etree.ElementTree as ET
from alta import ConfigurationFromYamlFile
from pkg_resources import resource_filename
from ..__details__ import __appname__
from appdirs import *
from comoda import ensure_dir
from shutil import copyfile
SAMPLES_WITHOUT_BARCODES = [2, 8]
DEFAULT_INDEX_CYCLES = dict(index='8', index1='8')
PROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
def get_md5(file_handle):
hasher = hashlib.md5()
for chunk in read_chunks(file_handle):
hasher.update(chunk)
return hasher.hexdigest()
def check_progress_status(root_path, started_file, completed_file):
localroot, dirnames, filenames = os.walk(root_path).next()
if started_file not in filenames:
return PROGRESS_STATUS.get('TODO')
elif completed_file not in filenames:
return PROGRESS_STATUS.get('STARTED')
else:
started_file = os.path.join(root_path, started_file)
completed_file = os.path.join(root_path, completed_file)
if os.path.getmtime(started_file) > os.path.getmtime(completed_file):
return PROGRESS_STATUS.get('STARTED')
return PROGRESS_STATUS.get('COMPLETED')
def runJob(cmd, logger):
try:
# subprocess.check_output(cmd)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
ret = process.wait()
return True
except subprocess.CalledProcessError as e:
logger.info(e)
if e.output:
logger.info("command output: %s", e.output)
else:
logger.info("no command output available")
return False
|
flexible
|
{
"blob_id": "b16c847912944e0563492d35768b5b5bf3a506c7",
"index": 1569,
"step-1": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\n<mask token>\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n logger.debug('config file paths: {}'.format(config_file_paths))\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\n<mask token>\n",
"step-4": "<mask token>\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\nPROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\ndef get_conf(logger, config_file_from_cli=None, profile=None):\n profiles = {'presta': 'presta_config.yml', 'celery': 'celery_config.yml'}\n default_config_file_label = profiles.get(profile, profiles['presta'])\n config_file_path = config_file_setup(logger, default_config_file_label,\n cf_from_cli=config_file_from_cli)\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n\n def file_missing(path, logger, force):\n msg = \"path - {} - doesn't exists\".format(path)\n if force:\n logger.error(msg)\n sys.exit()\n logger.warning(msg)\n return False\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(\n path, logger, force)\n\n\ndef sanitize_filename(filename):\n valid_chars = '-_.%s%s' % (string.ascii_letters, string.digits)\n return ''.join(c for c in filename if c in valid_chars)\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n logger.debug('config file paths: {}'.format(config_file_paths))\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\ndef read_chunks(file_handle, chunk_size=8192):\n while True:\n data = file_handle.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef get_md5(file_handle):\n hasher = hashlib.md5()\n for chunk in read_chunks(file_handle):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef check_progress_status(root_path, started_file, completed_file):\n localroot, dirnames, filenames = os.walk(root_path).next()\n if started_file not in filenames:\n return PROGRESS_STATUS.get('TODO')\n elif completed_file not in filenames:\n return PROGRESS_STATUS.get('STARTED')\n else:\n started_file = os.path.join(root_path, started_file)\n completed_file = os.path.join(root_path, completed_file)\n if os.path.getmtime(started_file) > os.path.getmtime(completed_file):\n return PROGRESS_STATUS.get('STARTED')\n return PROGRESS_STATUS.get('COMPLETED')\n\n\ndef runJob(cmd, logger):\n try:\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n output = process.communicate()[0]\n ret = process.wait()\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info('command output: %s', e.output)\n else:\n logger.info('no command output available')\n return False\n",
"step-5": "\"\"\"\nUtilities used by other modules.\n\"\"\"\n\nimport csv\nimport datetime\nimport hashlib\nimport json\nimport re\nimport string\nimport subprocess\nimport uuid\n\nimport xml.etree.ElementTree as ET\nfrom alta import ConfigurationFromYamlFile\nfrom pkg_resources import resource_filename\nfrom ..__details__ import __appname__\nfrom appdirs import *\nfrom comoda import ensure_dir\nfrom shutil import copyfile\n\n\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\nPROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item[\"IsIndexedRead\"] == \"Y\", reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(\n index=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] == \"2\"), None),\n index1=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] != \"2\"), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n\n for read in self.root.iter('Read'):\n if read.attrib[\"IsIndexedRead\"] == \"Y\":\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item[\"IsIndexedRead\"] == \"N\", reads)\n\n if len(reads) == 1:\n return False\n\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip()) # ms-dos\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)\n\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n else:\n if f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row['index1'])\n\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(\n index=index,\n index1=index1,\n )\n else:\n if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n\n return barcodes_mask\n\n\nclass WeightedPath(object):\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__,\n self.path,\n self.weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\ndef get_conf(logger, config_file_from_cli=None, profile=None):\n profiles = {'presta': 'presta_config.yml',\n 'celery': 'celery_config.yml'}\n default_config_file_label = profiles.get(profile, profiles['presta'])\n\n config_file_path = config_file_setup(logger, default_config_file_label,\n cf_from_cli=config_file_from_cli)\n\n # Load YAML configuration file\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n def file_missing(path, logger, force):\n msg = \"path - {} - doesn't exists\".format(path)\n if force:\n logger.error(msg)\n sys.exit()\n logger.warning(msg)\n return False\n\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,\n logger,\n force)\n\n\ndef sanitize_filename(filename):\n valid_chars = \"-_.%s%s\" % (string.ascii_letters, string.digits)\n return ''.join(c for c in filename if c in valid_chars)\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):\n filename = sanitize_filename(sample_label)\n\n if read:\n filename = '_'.join(\n [filename, lane, read]) if lane else '_'.join(\n [filename, read])\n\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n\n if ext:\n filename = '.'.join([filename, ext])\n\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n\n logger.debug(\"config file paths: {}\".format(config_file_paths))\n\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error(\"While touching {} file: {}\".format(path, e.strerror))\n\n\ndef read_chunks(file_handle, chunk_size=8192):\n while True:\n data = file_handle.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef get_md5(file_handle):\n hasher = hashlib.md5()\n for chunk in read_chunks(file_handle):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef check_progress_status(root_path, started_file, completed_file):\n localroot, dirnames, filenames = os.walk(root_path).next()\n\n if started_file not in filenames:\n return PROGRESS_STATUS.get('TODO')\n elif completed_file not in filenames:\n return PROGRESS_STATUS.get('STARTED')\n else:\n started_file = os.path.join(root_path, started_file)\n completed_file = os.path.join(root_path, completed_file)\n\n if os.path.getmtime(started_file) > os.path.getmtime(completed_file):\n return PROGRESS_STATUS.get('STARTED')\n\n return PROGRESS_STATUS.get('COMPLETED')\n\n\ndef runJob(cmd, logger):\n try:\n # subprocess.check_output(cmd)\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = process.communicate()[0]\n ret = process.wait()\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info(\"command output: %s\", e.output)\n else:\n logger.info(\"no command output available\")\n return False\n\n",
"step-ids": [
25,
27,
28,
36,
38
]
}
|
[
25,
27,
28,
36,
38
] |
def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print("bizzfizz")
elif num % 3 == 0:
print("fizz")
elif num % 5 == 0:
print("bizz")
else:
print(num)
value=input("enter the value ")
maths(int(value))
|
normal
|
{
"blob_id": "91f83adbe01e2d8070f9286031b77eae71beb83e",
"index": 1107,
"step-1": "<mask token>\n",
"step-2": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print('bizzfizz')\n elif num % 3 == 0:\n print('fizz')\n elif num % 5 == 0:\n print('bizz')\n else:\n print(num)\n\n\n<mask token>\n",
"step-3": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print('bizzfizz')\n elif num % 3 == 0:\n print('fizz')\n elif num % 5 == 0:\n print('bizz')\n else:\n print(num)\n\n\n<mask token>\nmaths(int(value))\n",
"step-4": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print('bizzfizz')\n elif num % 3 == 0:\n print('fizz')\n elif num % 5 == 0:\n print('bizz')\n else:\n print(num)\n\n\nvalue = input('enter the value ')\nmaths(int(value))\n",
"step-5": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print(\"bizzfizz\")\n elif num % 3 == 0:\n print(\"fizz\")\n elif num % 5 == 0:\n print(\"bizz\")\n else:\n print(num)\n\n\nvalue=input(\"enter the value \")\nmaths(int(value))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for file in flist:
print(file)
name = file.split('.nc')[0]
ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)
timespan = [ds.timespan]
print(timespan)
ti, tf = timespan[0].split(' to ')
yf = int(tf.split('-')[0])
mf = int(tf.split('-')[1])
if mf == 12:
yf = yf + 1
mf = '01'
else:
mf = mf + 1
tf = '{}-{}-28'.format(yf, str(mf).zfill(2))
if name == 'Ishii':
ti = '1990-01-31T00:00:00.000000'
tf = '2019-01-31T00:00:00.000000'
print('correct time: {} to {}'.format(ti, tf))
time = np.arange(ti, tf, dtype='datetime64[M]')
ds['time'] = np.array(time)
da = ds['data'].rename('sla_' + name)
da.data = da.data * mask
da.data = da.data - np.array(da.sel(time=slice('2005-01-01',
'2016-01-01')).mean(dim='time'))
datasets.append(da)
<|reserved_special_token_0|>
data.fill(np.nan)
<|reserved_special_token_0|>
for i, v in enumerate(var):
data[i] = np.array(ds[v])
<|reserved_special_token_0|>
ens.fill(np.nan)
<|reserved_special_token_0|>
names.append('ENS')
<|reserved_special_token_0|>
ds.to_netcdf(path_save + 'steric_upper.nc')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path = '/Volumes/LaCie_NIOZ/data/steric/data/'
path_to_original_files = path + 'original/'
flist = [file for file in os.listdir(path_to_original_files) if file.
endswith('.nc')]
path_to_regrided_files = path + 'regrid_180x360/'
ds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc')
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.landmask)
ds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/' +
'LAND_MASK_CRI-JPL_180x360_conservative.nc')
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.mask)
mask[mask == 1] = np.nan
mask[mask == 0] = 1
flist = [file for file in os.listdir(path_to_regrided_files) if file.
endswith('.nc')]
datasets = []
for file in flist:
print(file)
name = file.split('.nc')[0]
ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)
timespan = [ds.timespan]
print(timespan)
ti, tf = timespan[0].split(' to ')
yf = int(tf.split('-')[0])
mf = int(tf.split('-')[1])
if mf == 12:
yf = yf + 1
mf = '01'
else:
mf = mf + 1
tf = '{}-{}-28'.format(yf, str(mf).zfill(2))
if name == 'Ishii':
ti = '1990-01-31T00:00:00.000000'
tf = '2019-01-31T00:00:00.000000'
print('correct time: {} to {}'.format(ti, tf))
time = np.arange(ti, tf, dtype='datetime64[M]')
ds['time'] = np.array(time)
da = ds['data'].rename('sla_' + name)
da.data = da.data * mask
da.data = da.data - np.array(da.sel(time=slice('2005-01-01',
'2016-01-01')).mean(dim='time'))
datasets.append(da)
ds = xr.merge(datasets)
ds = ds.sel(time=slice('1993-01-01', ds.time[-1]))
var = [key for key in ds.variables if key.split('_')[0] == 'sla' and len(
key.split('_')) == 2]
data = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))
data.fill(np.nan)
names = [v.split('_')[-1] for v in var]
for i, v in enumerate(var):
data[i] = np.array(ds[v])
da = xr.Dataset(data_vars={'data': (('names', 'time', 'lat', 'lon'), data)},
coords={'lat': ds.lat, 'lon': ds.lon, 'time': ds.time, 'names': names})
ds['sla_ens'] = da.data.mean(dim='names')
ens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))
ens.fill(np.nan)
ens[0] = np.array(ds.sla_ens)
data2 = np.vstack([data, ens])
names.append('ENS')
ds = ds.assign_coords({'names': names})
ds['SLA'] = ['names', 'time', 'lat', 'lon'], data2
ds.attrs['units'] = 'meters'
ds.attrs['description'] = 'Steric sea-level height (m)'
ds.attrs['time_mean'] = 'Removed time mean from 2005-2015 (full years)'
ds.attrs['script'] = 'SLB-steric.py'
path_save = '/Volumes/LaCie_NIOZ/data/budget/'
ds.to_netcdf(path_save + 'steric_upper.nc')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
path = '/Volumes/LaCie_NIOZ/data/steric/data/'
path_to_original_files = path + 'original/'
flist = [file for file in os.listdir(path_to_original_files) if file.
endswith('.nc')]
path_to_regrided_files = path + 'regrid_180x360/'
ds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc')
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.landmask)
ds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/' +
'LAND_MASK_CRI-JPL_180x360_conservative.nc')
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.mask)
mask[mask == 1] = np.nan
mask[mask == 0] = 1
flist = [file for file in os.listdir(path_to_regrided_files) if file.
endswith('.nc')]
datasets = []
for file in flist:
print(file)
name = file.split('.nc')[0]
ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)
timespan = [ds.timespan]
print(timespan)
ti, tf = timespan[0].split(' to ')
yf = int(tf.split('-')[0])
mf = int(tf.split('-')[1])
if mf == 12:
yf = yf + 1
mf = '01'
else:
mf = mf + 1
tf = '{}-{}-28'.format(yf, str(mf).zfill(2))
if name == 'Ishii':
ti = '1990-01-31T00:00:00.000000'
tf = '2019-01-31T00:00:00.000000'
print('correct time: {} to {}'.format(ti, tf))
time = np.arange(ti, tf, dtype='datetime64[M]')
ds['time'] = np.array(time)
da = ds['data'].rename('sla_' + name)
da.data = da.data * mask
da.data = da.data - np.array(da.sel(time=slice('2005-01-01',
'2016-01-01')).mean(dim='time'))
datasets.append(da)
ds = xr.merge(datasets)
ds = ds.sel(time=slice('1993-01-01', ds.time[-1]))
var = [key for key in ds.variables if key.split('_')[0] == 'sla' and len(
key.split('_')) == 2]
data = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))
data.fill(np.nan)
names = [v.split('_')[-1] for v in var]
for i, v in enumerate(var):
data[i] = np.array(ds[v])
da = xr.Dataset(data_vars={'data': (('names', 'time', 'lat', 'lon'), data)},
coords={'lat': ds.lat, 'lon': ds.lon, 'time': ds.time, 'names': names})
ds['sla_ens'] = da.data.mean(dim='names')
ens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))
ens.fill(np.nan)
ens[0] = np.array(ds.sla_ens)
data2 = np.vstack([data, ens])
names.append('ENS')
ds = ds.assign_coords({'names': names})
ds['SLA'] = ['names', 'time', 'lat', 'lon'], data2
ds.attrs['units'] = 'meters'
ds.attrs['description'] = 'Steric sea-level height (m)'
ds.attrs['time_mean'] = 'Removed time mean from 2005-2015 (full years)'
ds.attrs['script'] = 'SLB-steric.py'
path_save = '/Volumes/LaCie_NIOZ/data/budget/'
ds.to_netcdf(path_save + 'steric_upper.nc')
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 11:52:48 2022
@author: ccamargo
"""
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
# 1. get filelist
path = "/Volumes/LaCie_NIOZ/data/steric/data/"
path_to_original_files = path + "original/"
flist = [file for file in os.listdir(path_to_original_files) if file.endswith(".nc")]
path_to_regrided_files = path + "regrid_180x360/"
#%% 2. Regrid:
# for file in flist:
# fin=path_to_original_files+file
# fout=path_to_regrided_files+file
# command_list=str('cdo -L remapbil,r360x180 '+fin+' '+fout)
# _tmp=os.system(command_list)
#%% landmask
ds = xr.open_dataset("/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc")
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.landmask)
ds = xr.open_dataset(
"/Volumes/LaCie_NIOZ/data/barystatic/masks/"
+ "LAND_MASK_CRI-JPL_180x360_conservative.nc"
)
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.mask)
mask[mask == 1] = np.nan
mask[mask == 0] = 1
# %% 3. get data
flist = [file for file in os.listdir(path_to_regrided_files) if file.endswith(".nc")]
datasets = []
for file in flist:
print(file)
name = file.split(".nc")[0]
ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)
timespan = [ds.timespan]
print(timespan)
ti, tf = timespan[0].split(" to ")
yf = int(tf.split("-")[0])
mf = int(tf.split("-")[1])
if mf == 12:
yf = yf + 1
mf = "01"
else:
mf = mf + 1
tf = "{}-{}-28".format(yf, str(mf).zfill(2))
if name == "Ishii":
ti = "1990-01-31T00:00:00.000000"
tf = "2019-01-31T00:00:00.000000"
print("correct time: {} to {}".format(ti, tf))
# tf = '{}-{}-{}'.format(time[-1].year,str(time[-1].month).zfill(2),time[-1].day +15)
time = np.arange(ti, tf, dtype="datetime64[M]")
ds["time"] = np.array(time)
da = ds["data"].rename("sla_" + name)
da.data = da.data * mask
da.data = da.data - np.array(
da.sel(time=slice("2005-01-01", "2016-01-01")).mean(dim="time")
)
datasets.append(da)
# print(da)
#%% merge datasets
ds = xr.merge(datasets)
#% % select since 1993
ds = ds.sel(time=slice("1993-01-01", ds.time[-1]))
#% % compute ENS mean
var = [
key
for key in ds.variables
if key.split("_")[0] == "sla" and len(key.split("_")) == 2
]
data = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))
data.fill(np.nan)
names = [v.split("_")[-1] for v in var]
for i, v in enumerate(var):
data[i] = np.array(ds[v])
da = xr.Dataset(
data_vars={"data": (("names", "time", "lat", "lon"), data)},
coords={"lat": ds.lat, "lon": ds.lon, "time": ds.time, "names": names},
)
# ds['sla_ens'] = (['time','lat','lon'],np.nanmean(datamu,axis=0))
ds["sla_ens"] = da.data.mean(dim="names")
ens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))
ens.fill(np.nan)
ens[0] = np.array(ds.sla_ens)
data2 = np.vstack([data, ens])
names.append("ENS")
ds = ds.assign_coords({"names": names})
ds["SLA"] = (["names", "time", "lat", "lon"], data2)
ds.attrs["units"] = "meters"
ds.attrs["description"] = "Steric sea-level height (m)"
ds.attrs["time_mean"] = "Removed time mean from 2005-2015 (full years)"
ds.attrs["script"] = "SLB-steric.py"
#%% save
path_save = "/Volumes/LaCie_NIOZ/data/budget/"
ds.to_netcdf(path_save + "steric_upper.nc")
|
flexible
|
{
"blob_id": "4fc4bb81d47a33e4669df46033033fddeca6544e",
"index": 8858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor file in flist:\n print(file)\n name = file.split('.nc')[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(' to ')\n yf = int(tf.split('-')[0])\n mf = int(tf.split('-')[1])\n if mf == 12:\n yf = yf + 1\n mf = '01'\n else:\n mf = mf + 1\n tf = '{}-{}-28'.format(yf, str(mf).zfill(2))\n if name == 'Ishii':\n ti = '1990-01-31T00:00:00.000000'\n tf = '2019-01-31T00:00:00.000000'\n print('correct time: {} to {}'.format(ti, tf))\n time = np.arange(ti, tf, dtype='datetime64[M]')\n ds['time'] = np.array(time)\n da = ds['data'].rename('sla_' + name)\n da.data = da.data * mask\n da.data = da.data - np.array(da.sel(time=slice('2005-01-01',\n '2016-01-01')).mean(dim='time'))\n datasets.append(da)\n<mask token>\ndata.fill(np.nan)\n<mask token>\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\n<mask token>\nens.fill(np.nan)\n<mask token>\nnames.append('ENS')\n<mask token>\nds.to_netcdf(path_save + 'steric_upper.nc')\n",
"step-3": "<mask token>\npath = '/Volumes/LaCie_NIOZ/data/steric/data/'\npath_to_original_files = path + 'original/'\nflist = [file for file in os.listdir(path_to_original_files) if file.\n endswith('.nc')]\npath_to_regrided_files = path + 'regrid_180x360/'\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.landmask)\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/' +\n 'LAND_MASK_CRI-JPL_180x360_conservative.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.mask)\nmask[mask == 1] = np.nan\nmask[mask == 0] = 1\nflist = [file for file in os.listdir(path_to_regrided_files) if file.\n endswith('.nc')]\ndatasets = []\nfor file in flist:\n print(file)\n name = file.split('.nc')[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(' to ')\n yf = int(tf.split('-')[0])\n mf = int(tf.split('-')[1])\n if mf == 12:\n yf = yf + 1\n mf = '01'\n else:\n mf = mf + 1\n tf = '{}-{}-28'.format(yf, str(mf).zfill(2))\n if name == 'Ishii':\n ti = '1990-01-31T00:00:00.000000'\n tf = '2019-01-31T00:00:00.000000'\n print('correct time: {} to {}'.format(ti, tf))\n time = np.arange(ti, tf, dtype='datetime64[M]')\n ds['time'] = np.array(time)\n da = ds['data'].rename('sla_' + name)\n da.data = da.data * mask\n da.data = da.data - np.array(da.sel(time=slice('2005-01-01',\n '2016-01-01')).mean(dim='time'))\n datasets.append(da)\nds = xr.merge(datasets)\nds = ds.sel(time=slice('1993-01-01', ds.time[-1]))\nvar = [key for key in ds.variables if key.split('_')[0] == 'sla' and len(\n key.split('_')) == 2]\ndata = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))\ndata.fill(np.nan)\nnames = [v.split('_')[-1] for v in var]\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\nda = xr.Dataset(data_vars={'data': (('names', 'time', 'lat', 'lon'), data)},\n coords={'lat': ds.lat, 'lon': ds.lon, 'time': ds.time, 'names': names})\nds['sla_ens'] = da.data.mean(dim='names')\nens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))\nens.fill(np.nan)\nens[0] = np.array(ds.sla_ens)\ndata2 = np.vstack([data, ens])\nnames.append('ENS')\nds = ds.assign_coords({'names': names})\nds['SLA'] = ['names', 'time', 'lat', 'lon'], data2\nds.attrs['units'] = 'meters'\nds.attrs['description'] = 'Steric sea-level height (m)'\nds.attrs['time_mean'] = 'Removed time mean from 2005-2015 (full years)'\nds.attrs['script'] = 'SLB-steric.py'\npath_save = '/Volumes/LaCie_NIOZ/data/budget/'\nds.to_netcdf(path_save + 'steric_upper.nc')\n",
"step-4": "<mask token>\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\npath = '/Volumes/LaCie_NIOZ/data/steric/data/'\npath_to_original_files = path + 'original/'\nflist = [file for file in os.listdir(path_to_original_files) if file.\n endswith('.nc')]\npath_to_regrided_files = path + 'regrid_180x360/'\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.landmask)\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/' +\n 'LAND_MASK_CRI-JPL_180x360_conservative.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.mask)\nmask[mask == 1] = np.nan\nmask[mask == 0] = 1\nflist = [file for file in os.listdir(path_to_regrided_files) if file.\n endswith('.nc')]\ndatasets = []\nfor file in flist:\n print(file)\n name = file.split('.nc')[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(' to ')\n yf = int(tf.split('-')[0])\n mf = int(tf.split('-')[1])\n if mf == 12:\n yf = yf + 1\n mf = '01'\n else:\n mf = mf + 1\n tf = '{}-{}-28'.format(yf, str(mf).zfill(2))\n if name == 'Ishii':\n ti = '1990-01-31T00:00:00.000000'\n tf = '2019-01-31T00:00:00.000000'\n print('correct time: {} to {}'.format(ti, tf))\n time = np.arange(ti, tf, dtype='datetime64[M]')\n ds['time'] = np.array(time)\n da = ds['data'].rename('sla_' + name)\n da.data = da.data * mask\n da.data = da.data - np.array(da.sel(time=slice('2005-01-01',\n '2016-01-01')).mean(dim='time'))\n datasets.append(da)\nds = xr.merge(datasets)\nds = ds.sel(time=slice('1993-01-01', ds.time[-1]))\nvar = [key for key in ds.variables if key.split('_')[0] == 'sla' and len(\n key.split('_')) == 2]\ndata = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))\ndata.fill(np.nan)\nnames = [v.split('_')[-1] for v in var]\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\nda = xr.Dataset(data_vars={'data': (('names', 'time', 'lat', 'lon'), data)},\n coords={'lat': ds.lat, 'lon': ds.lon, 'time': ds.time, 'names': names})\nds['sla_ens'] = da.data.mean(dim='names')\nens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))\nens.fill(np.nan)\nens[0] = np.array(ds.sla_ens)\ndata2 = np.vstack([data, ens])\nnames.append('ENS')\nds = ds.assign_coords({'names': names})\nds['SLA'] = ['names', 'time', 'lat', 'lon'], data2\nds.attrs['units'] = 'meters'\nds.attrs['description'] = 'Steric sea-level height (m)'\nds.attrs['time_mean'] = 'Removed time mean from 2005-2015 (full years)'\nds.attrs['script'] = 'SLB-steric.py'\npath_save = '/Volumes/LaCie_NIOZ/data/budget/'\nds.to_netcdf(path_save + 'steric_upper.nc')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 1 11:52:48 2022\n\n@author: ccamargo\n\"\"\"\n\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# 1. get filelist\npath = \"/Volumes/LaCie_NIOZ/data/steric/data/\"\npath_to_original_files = path + \"original/\"\nflist = [file for file in os.listdir(path_to_original_files) if file.endswith(\".nc\")]\n\npath_to_regrided_files = path + \"regrid_180x360/\"\n\n\n#%% 2. Regrid:\n# for file in flist:\n# fin=path_to_original_files+file\n# fout=path_to_regrided_files+file\n# command_list=str('cdo -L remapbil,r360x180 '+fin+' '+fout)\n# _tmp=os.system(command_list)\n#%% landmask\nds = xr.open_dataset(\"/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc\")\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.landmask)\n\nds = xr.open_dataset(\n \"/Volumes/LaCie_NIOZ/data/barystatic/masks/\"\n + \"LAND_MASK_CRI-JPL_180x360_conservative.nc\"\n)\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.mask)\nmask[mask == 1] = np.nan\nmask[mask == 0] = 1\n# %% 3. get data\nflist = [file for file in os.listdir(path_to_regrided_files) if file.endswith(\".nc\")]\ndatasets = []\nfor file in flist:\n print(file)\n name = file.split(\".nc\")[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(\" to \")\n yf = int(tf.split(\"-\")[0])\n mf = int(tf.split(\"-\")[1])\n if mf == 12:\n yf = yf + 1\n mf = \"01\"\n else:\n mf = mf + 1\n tf = \"{}-{}-28\".format(yf, str(mf).zfill(2))\n if name == \"Ishii\":\n ti = \"1990-01-31T00:00:00.000000\"\n tf = \"2019-01-31T00:00:00.000000\"\n print(\"correct time: {} to {}\".format(ti, tf))\n # tf = '{}-{}-{}'.format(time[-1].year,str(time[-1].month).zfill(2),time[-1].day +15)\n time = np.arange(ti, tf, dtype=\"datetime64[M]\")\n ds[\"time\"] = np.array(time)\n\n da = ds[\"data\"].rename(\"sla_\" + name)\n da.data = da.data * mask\n da.data = da.data - np.array(\n da.sel(time=slice(\"2005-01-01\", \"2016-01-01\")).mean(dim=\"time\")\n )\n datasets.append(da)\n # print(da)\n#%% merge datasets\nds = xr.merge(datasets)\n#% % select since 1993\nds = ds.sel(time=slice(\"1993-01-01\", ds.time[-1]))\n#% % compute ENS mean\nvar = [\n key\n for key in ds.variables\n if key.split(\"_\")[0] == \"sla\" and len(key.split(\"_\")) == 2\n]\ndata = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))\ndata.fill(np.nan)\nnames = [v.split(\"_\")[-1] for v in var]\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\nda = xr.Dataset(\n data_vars={\"data\": ((\"names\", \"time\", \"lat\", \"lon\"), data)},\n coords={\"lat\": ds.lat, \"lon\": ds.lon, \"time\": ds.time, \"names\": names},\n)\n\n# ds['sla_ens'] = (['time','lat','lon'],np.nanmean(datamu,axis=0))\nds[\"sla_ens\"] = da.data.mean(dim=\"names\")\nens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))\nens.fill(np.nan)\nens[0] = np.array(ds.sla_ens)\ndata2 = np.vstack([data, ens])\nnames.append(\"ENS\")\nds = ds.assign_coords({\"names\": names})\nds[\"SLA\"] = ([\"names\", \"time\", \"lat\", \"lon\"], data2)\n\nds.attrs[\"units\"] = \"meters\"\nds.attrs[\"description\"] = \"Steric sea-level height (m)\"\nds.attrs[\"time_mean\"] = \"Removed time mean from 2005-2015 (full years)\"\nds.attrs[\"script\"] = \"SLB-steric.py\"\n#%% save\npath_save = \"/Volumes/LaCie_NIOZ/data/budget/\"\nds.to_netcdf(path_save + \"steric_upper.nc\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
def findSubset(s0, s, t):
mys0 = s0.copy()
mys = s.copy()
if t == 0 and mys0:
return mys0
elif t == 0: # and mys0 == set()
return True
else:
if len(mys) > 0:
p = mys.pop()
mys1 = mys0.copy()
mys1.add(p)
if t-p < 0:
return findSubset(mys0, mys, t)
else:
return findSubset(mys1, mys, t-p) or findSubset(mys0, mys, t)
else:
return False
if __name__ == "__main__":
candidate = set()
big = set([1,2,3,4,5,6])
total = 11
print(findSubset(candidate, big, total))
|
normal
|
{
"blob_id": "079610f2aaebec8c6e46ccf21a9d5728df1be8de",
"index": 4155,
"step-1": "<mask token>\n",
"step-2": "def findSubset(s0, s, t):\n mys0 = s0.copy()\n mys = s.copy()\n if t == 0 and mys0:\n return mys0\n elif t == 0:\n return True\n elif len(mys) > 0:\n p = mys.pop()\n mys1 = mys0.copy()\n mys1.add(p)\n if t - p < 0:\n return findSubset(mys0, mys, t)\n else:\n return findSubset(mys1, mys, t - p) or findSubset(mys0, mys, t)\n else:\n return False\n\n\n<mask token>\n",
"step-3": "def findSubset(s0, s, t):\n mys0 = s0.copy()\n mys = s.copy()\n if t == 0 and mys0:\n return mys0\n elif t == 0:\n return True\n elif len(mys) > 0:\n p = mys.pop()\n mys1 = mys0.copy()\n mys1.add(p)\n if t - p < 0:\n return findSubset(mys0, mys, t)\n else:\n return findSubset(mys1, mys, t - p) or findSubset(mys0, mys, t)\n else:\n return False\n\n\nif __name__ == '__main__':\n candidate = set()\n big = set([1, 2, 3, 4, 5, 6])\n total = 11\n print(findSubset(candidate, big, total))\n",
"step-4": "#!/usr/bin/env python\n\n\ndef findSubset(s0, s, t):\n\n mys0 = s0.copy()\n mys = s.copy()\n \n if t == 0 and mys0:\n return mys0\n elif t == 0: # and mys0 == set()\n return True\n else:\n if len(mys) > 0:\n p = mys.pop()\n mys1 = mys0.copy()\n mys1.add(p)\n if t-p < 0:\n return findSubset(mys0, mys, t)\n else:\n return findSubset(mys1, mys, t-p) or findSubset(mys0, mys, t)\n else:\n return False\n \n\n\nif __name__ == \"__main__\":\n\n candidate = set()\n big = set([1,2,3,4,5,6])\n total = 11\n print(findSubset(candidate, big, total))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':
model} for res in data or test_data]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_lower(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_non_string(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['FieldNotInResource']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(
f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower().contains("tv")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_startswith(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")'
)
assert ids(resp) == [0]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_recurse(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')
def test_search_recurse_multiple_props_lower(model, app):
r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':
'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',
'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{
'govid': '3', 'country': 'NO'}]}]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
<|reserved_special_token_0|>
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',
'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',
'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}
)
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
<|reserved_special_token_0|>
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc,
"""
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
"""
, backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',
'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':
1, 'name': 'Base location', 'type': 'city'})
app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':
'City', 'population': 100})
resp = app.get(
'/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'
)
assert resp.json()['_data'] == [{'_base': {'name': 'Base location',
'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {'_revision': revision}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':
model} for res in data or test_data]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_lower(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_non_string(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['FieldNotInResource']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(
f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower().contains("tv")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_type_check(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_startswith(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")'
)
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3 = _push_test_data(app, model)
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_recurse(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')
def test_search_recurse_multiple_props_lower(model, app):
r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':
'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',
'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{
'govid': '3', 'country': 'NO'}]}]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
<|reserved_special_token_0|>
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
assert ids(resp) == [0, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',
'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',
'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}
)
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
missingfields |
| code | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',
'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',
'code': 'ee'}]})
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
missingfields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',
None)]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc,
"""
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
"""
, backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',
'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':
1, 'name': 'Base location', 'type': 'city'})
app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':
'City', 'population': 100})
resp = app.get(
'/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'
)
assert resp.json()['_data'] == [{'_base': {'name': 'Base location',
'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {'_revision': revision}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':
model} for res in data or test_data]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_lower(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_non_string(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['FieldNotInResource']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_multiple_props(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?count!=10&count!=42')
assert ids(resp) == [2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(
f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower().contains("tv")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_type_check(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_with_select(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)'
)
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {'count': 42}
mocker.patch.object(context.get('config'), 'always_show_id', True)
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)'
)
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {'_id': r2['_id'], 'count': 42}
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {'_id': r2['_id']}
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_startswith(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?notes.note="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?notes.note.lower()="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?notes.create_date="2019-03-14"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?notes.create_date>"2019-04-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?notes.foo.bar="baz"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['FieldNotInResource']
resp = app.get(f'/{model}?notes.note.contains("bar")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")'
)
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3 = _push_test_data(app, model)
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_recurse(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')
def test_search_recurse_multiple_props_lower(model, app):
r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':
'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',
'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{
'govid': '3', 'country': 'NO'}]}]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
def test_search_any_in_list_of_scalars(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(
f'/{model}?any("eq",operating_licenses.license_types,"valid","invalid","expired")'
)
assert sorted(ids(resp)) == [0, 1]
resp = app.get(
f'/{model}?any("ne",operating_licenses.license_types,"expired")')
assert sorted(ids(resp)) == [0]
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_group(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"&status="OK")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lower_contains(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?report_type.lower().contains("st")')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',
'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',
'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}
)
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
missingfields |
| code | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',
'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',
'code': 'ee'}]})
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
missingfields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',
None)]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc,
"""
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
"""
, backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',
'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':
1, 'name': 'Base location', 'type': 'city'})
app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':
'City', 'population': 100})
resp = app.get(
'/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'
)
assert resp.json()['_data'] == [{'_base': {'name': 'Base location',
'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {'_revision': revision}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':
model} for res in data or test_data]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_lower(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_non_string(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['FieldNotInResource']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_gte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lte(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_multiple_props(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?count!=10&count!=42')
assert ids(resp) == [2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(
f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower().contains("tv")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_type_check(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_contains_with_select(model, context, app, mocker):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)'
)
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {'count': 42}
mocker.patch.object(context.get('config'), 'always_show_id', True)
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)'
)
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {'_id': r2['_id'], 'count': 42}
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {'_id': r2['_id']}
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_startswith(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")'
)
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(
f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['InvalidValue']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?notes.note="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?notes.note.lower()="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?notes.create_date="2019-03-14"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
resp = app.get(f'/{model}?notes.create_date>"2019-04-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
resp = app.get(f'/{model}?notes.foo.bar="baz"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ['FieldNotInResource']
resp = app.get(f'/{model}?notes.note.contains("bar")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")'
)
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3 = _push_test_data(app, model)
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(
f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
def ids(resources):
if isinstance(resources, (requests.models.Response, httpx.Response)):
resp = resources
assert resp.status_code == 200, resp.json()
resources = resp.json()['_data']
return [r['_id'] for r in resources]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_nested_recurse(model, context, app):
r1, r2, r3 = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')
def test_search_nested_recurse_multiple_props(model, context, app):
r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':
'fi', 'govids': [{'govid': '1', 'country': 'fi'}, {'govid': '2',
'country': 'se'}]}, {'title': 'Org', 'country': 'no', 'govids': [{
'govid': '3', 'country': 'no'}]}]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country)="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="no"')
assert ids(resp) == [r2]
@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')
def test_search_recurse_multiple_props_lower(model, app):
r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':
'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',
'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{
'govid': '3', 'country': 'NO'}]}]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
def test_search_any_in_list_of_scalars(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(
f'/{model}?any("eq",operating_licenses.license_types,"valid","invalid","expired")'
)
assert sorted(ids(resp)) == [0, 1]
resp = app.get(
f'/{model}?any("ne",operating_licenses.license_types,"expired")')
assert sorted(ids(resp)) == [0]
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
<|reserved_special_token_0|>
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_group(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"&status="OK")')
assert ids(resp) == [0]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_lower_contains(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?report_type.lower().contains("st")')
assert ids(resp) == [0, 2]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',
'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',
'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}
)
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
extrafields |
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({'backends': [backend], 'manifests.default': {'type':
'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}
)
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
missingfields |
| code | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',
'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',
'code': 'ee'}]})
assert resp.status_code == 200, resp.json()
create_tabular_manifest(tmp_path / 'manifest.csv', striptable(
"""
m | property | type
missingfields |
| code | string
| name | string
"""
))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',
None)]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc,
"""
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
"""
, backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',
'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':
1, 'name': 'Base location', 'type': 'city'})
app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':
'City', 'population': 100})
resp = app.get(
'/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'
)
assert resp.json()['_data'] == [{'_base': {'name': 'Base location',
'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]
@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {'_revision': revision}
<|reserved_special_token_1|>
import uuid
import json
import pytest
import requests
import httpx
from spinta.testing.manifest import bootstrap_manifest
from spinta.utils.data import take
from spinta.testing.utils import error
from spinta.testing.utils import get_error_codes, RowIds
from spinta.testing.context import create_test_context
from spinta.testing.client import create_test_client
from spinta.manifests.tabular.helpers import striptable
from spinta.testing.tabular import create_tabular_manifest
from spinta.testing.data import listdata
test_data = [
{
'_type': 'report',
'status': 'OK',
'report_type': 'STV',
'count': 10,
'notes': [{
'note': 'hello',
'note_type': 'simple',
'create_date': '2019-03-14',
}],
'operating_licenses': [{
'license_types': ['valid', 'invalid'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'VMI',
'count': 42,
'notes': [{
'note': 'world',
'note_type': 'daily',
'create_date': '2019-04-20',
}],
'operating_licenses': [{
'license_types': ['expired'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'STV',
'count': 13,
'notes': [{
'note': 'foo bar',
'note_type': 'important',
'create_date': '2019-02-01',
}],
},
]
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [
{
**res,
'_op': 'insert',
'_type': model,
}
for res in data or test_data
]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_lower(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_non_string(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, non string type
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# single field fsearch, non string type
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# single non-existing field value search
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
# single non-existing field search
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search
resp = app.get(f'/{model}?status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search, case insensitive
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count!=10&count!=42')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structure
resp = app.get(f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structures and not full data in all resources
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower().contains("tv")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field search
# test if operators are joined with AND logic for same field
resp = app.get(f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_type_check(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_with_select(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# `contains` with select
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'count': 42,
}
# `contains` with select and always_show_id
mocker.patch.object(context.get('config'), 'always_show_id', True)
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
'count': 42,
}
# `contains` with always_show_id should return just id
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
}
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_startswith(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# sanity check that `startswith` searches from the start
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
# `startswith` type check
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# nested `exact` search
resp = app.get(f'/{model}?notes.note="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search, case insensitive
resp = app.get(f'/{model}?notes.note.lower()="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search with dates
resp = app.get(f'/{model}?notes.create_date="2019-03-14"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# nested `gt` search
resp = app.get(f'/{model}?notes.create_date>"2019-04-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# nested non existant field
resp = app.get(f'/{model}?notes.foo.bar="baz"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
# nested `contains` search
resp = app.get(f'/{model}?notes.note.contains("bar")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3, = _push_test_data(app, model)
# nested `startswith` search
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
def ids(resources):
if isinstance(resources, (requests.models.Response, httpx.Response)):
resp = resources
assert resp.status_code == 200, resp.json()
resources = resp.json()['_data']
return [r['_id'] for r in resources]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse_lower(model, context, app):
r1, r2, r3, = ids(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(status).lower()="ok"')
assert ids(resp) == [r1]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_nested_recurse_multiple_props(model, context, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'fi'},
{'govid': '2', 'country': 'se'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'no'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country)="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="no"')
assert ids(resp) == [r2]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_recurse_multiple_props_lower(model, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'FI'},
{'govid': '2', 'country': 'SE'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'NO'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
# TODO: add mongo
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
# TODO: add mongo
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_in_list_of_scalars(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",operating_licenses.license_types,"valid","invalid","expired")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",operating_licenses.license_types,"expired")')
assert sorted(ids(resp)) == [0]
# TODO: add mongo
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
# TODO: add mongo
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_startswith(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision'][:5]
resp = app.get(f'/{model}?_revision.startswith("{revision}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_group(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"&status="OK")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lower_contains(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?report_type.lower().contains("st")')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| code | string
| name | string
'''))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [
{'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},
{'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},
{'_op': 'insert', 'code': 'ee', 'name': 'Estija'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == [
"Estija",
"Latvija",
"Lietuva",
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [
{'_op': 'insert', 'code': 'lt'},
{'_op': 'insert', 'code': 'lv'},
{'_op': 'insert', 'code': 'ee'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [
('ee', None),
('lt', None),
('lv', None),
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc, '''
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
''', backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={
'_id': _id,
'id': 1,
'name': 'Base location',
'type': 'city'
})
app.post('/datasets/gov/example/base/City', json={
'_id': _id,
'name': 'City',
'population': 100
})
resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')
assert resp.json()['_data'] == [
{
'_base': {'name': 'Base location', 'type': 'city'},
'id': 1,
'name': 'City',
'population': 100
}
]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {
'_revision': revision
}
|
flexible
|
{
"blob_id": "57e9c1a4ac57f68e0e73c2c67c6828de8efb1b16",
"index": 3903,
"step-1": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n<mask token>\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n<mask token>\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-2": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n<mask token>\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-3": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'count': 42}\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id'], 'count': 42}\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id']}\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")'\n )\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(\n f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-4": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'count': 42}\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id'], 'count': 42}\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id']}\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\ndef ids(resources):\n if isinstance(resources, (requests.models.Response, httpx.Response)):\n resp = resources\n assert resp.status_code == 200, resp.json()\n resources = resp.json()['_data']\n return [r['_id'] for r in resources]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_nested_recurse_multiple_props(model, context, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'fi'}, {'govid': '2',\n 'country': 'se'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'no'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country)=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country)=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country)=\"no\"')\n assert ids(resp) == [r2]\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")'\n )\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(\n f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-5": "import uuid\nimport json\n\nimport pytest\nimport requests\nimport httpx\nfrom spinta.testing.manifest import bootstrap_manifest\n\nfrom spinta.utils.data import take\nfrom spinta.testing.utils import error\nfrom spinta.testing.utils import get_error_codes, RowIds\nfrom spinta.testing.context import create_test_context\nfrom spinta.testing.client import create_test_client\nfrom spinta.manifests.tabular.helpers import striptable\nfrom spinta.testing.tabular import create_tabular_manifest\nfrom spinta.testing.data import listdata\n\n\ntest_data = [\n {\n '_type': 'report',\n 'status': 'OK',\n 'report_type': 'STV',\n 'count': 10,\n 'notes': [{\n 'note': 'hello',\n 'note_type': 'simple',\n 'create_date': '2019-03-14',\n }],\n 'operating_licenses': [{\n 'license_types': ['valid', 'invalid'],\n }],\n },\n {\n '_type': 'report',\n 'status': 'invalid',\n 'report_type': 'VMI',\n 'count': 42,\n 'notes': [{\n 'note': 'world',\n 'note_type': 'daily',\n 'create_date': '2019-04-20',\n }],\n 'operating_licenses': [{\n 'license_types': ['expired'],\n }],\n },\n {\n '_type': 'report',\n 'status': 'invalid',\n 'report_type': 'STV',\n 'count': 13,\n 'notes': [{\n 'note': 'foo bar',\n 'note_type': 'important',\n 'create_date': '2019-02-01',\n }],\n },\n]\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [\n {\n **res,\n '_op': 'insert',\n '_type': model,\n }\n for res in data or test_data\n ]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search, non string type\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # single field fsearch, non string type\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # single non-existing field value search\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n # single non-existing field search\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"FieldNotInResource\"]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gt(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `greater_than` works as expected\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gte(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `greater_than` works as expected\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lt(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `lower_than` works as expected\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lte(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `lower_than` works as expected\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n\n # single field search\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # single field search, case insensitive\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # test `ne` with nested structure\n resp = app.get(f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # test `ne` with nested structures and not full data in all resources\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n # single field search, case insensitive\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field search\n # test if operators are joined with AND logic for same field\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n\n # `contains` with select\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n 'count': 42,\n }\n\n # `contains` with select and always_show_id\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n '_id': r2['_id'],\n 'count': 42,\n }\n\n # `contains` with always_show_id should return just id\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n '_id': r2['_id'],\n }\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_startswith(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # single field search, case insensitive\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # sanity check that `startswith` searches from the start\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n\n # `startswith` type check\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # nested `exact` search\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # nested `exact` search, case insensitive\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # nested `exact` search with dates\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # nested `gt` search\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # nested non existant field\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"FieldNotInResource\"]\n\n # nested `contains` search\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3, = _push_test_data(app, model)\n\n # nested `startswith` search\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n resp = app.get(f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\ndef ids(resources):\n if isinstance(resources, (requests.models.Response, httpx.Response)):\n resp = resources\n assert resp.status_code == 200, resp.json()\n resources = resp.json()['_data']\n return [r['_id'] for r in resources]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_recurse_lower(model, context, app):\n r1, r2, r3, = ids(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(status).lower()=\"ok\"')\n assert ids(resp) == [r1]\n\n\n@pytest.mark.models(\n 'backends/mongo/recurse',\n 'backends/postgres/recurse',\n)\ndef test_search_nested_recurse_multiple_props(model, context, app):\n r1, r2, = ids(_push_test_data(app, model, [\n {\n 'title': \"Org\",\n 'country': 'fi',\n 'govids': [\n {'govid': '1', 'country': 'fi'},\n {'govid': '2', 'country': 'se'},\n ]\n },\n {\n 'title': \"Org\",\n 'country': 'no',\n 'govids': [\n {'govid': '3', 'country': 'no'},\n ]\n },\n ]))\n app.authmodel(model, ['search'])\n\n resp = app.get(f'/{model}?recurse(country)=\"se\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country)=\"fi\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country)=\"no\"')\n assert ids(resp) == [r2]\n\n\n@pytest.mark.models(\n 'backends/mongo/recurse',\n 'backends/postgres/recurse',\n)\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2, = ids(_push_test_data(app, model, [\n {\n 'title': \"Org\",\n 'country': 'fi',\n 'govids': [\n {'govid': '1', 'country': 'FI'},\n {'govid': '2', 'country': 'SE'},\n ]\n },\n {\n 'title': \"Org\",\n 'country': 'no',\n 'govids': [\n {'govid': '3', 'country': 'NO'},\n ]\n },\n ]))\n app.authmodel(model, ['search'])\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\n# TODO: add mongo\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\n# TODO: add mongo\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n# TODO: add mongo\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")')\n assert sorted(ids(resp)) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\n# TODO: add mongo\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n# TODO: add mongo\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_revision_startswith(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision'][:5]\n resp = app.get(f'/{model}?_revision.startswith(\"{revision}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n # XXX: Flaky test, some times it gives [2, 0], don't know why.\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n # XXX: Flaky test, some times it gives [2, 0], don't know why.\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [\n {'status': 'OK'},\n {},\n ]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [\n {'status': 'OK'},\n {},\n ]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({\n 'backends': [backend],\n 'manifests.default': {\n 'type': 'tabular',\n 'path': str(tmp_path / 'manifest.csv'),\n 'backend': backend,\n },\n })\n\n # Create data into a extrafields model with code and name properties.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n extrafields |\n | code | string\n | name | string\n '''))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [\n {'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},\n {'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},\n {'_op': 'insert', 'code': 'ee', 'name': 'Estija'},\n ]})\n assert resp.status_code == 200, resp.json()\n\n # Now try to read from same model, but loaded with just one property.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n extrafields |\n | name | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == [\n \"Estija\",\n \"Latvija\",\n \"Lietuva\",\n ]\n\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({\n 'backends': [backend],\n 'manifests.default': {\n 'type': 'tabular',\n 'path': str(tmp_path / 'manifest.csv'),\n 'backend': backend,\n },\n })\n\n # Create data into a extrafields model with code and name properties.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n missingfields |\n | code | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [\n {'_op': 'insert', 'code': 'lt'},\n {'_op': 'insert', 'code': 'lv'},\n {'_op': 'insert', 'code': 'ee'},\n ]})\n assert resp.status_code == 200, resp.json()\n\n # Now try to read from same model, but loaded with just one property.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n missingfields |\n | code | string\n | name | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [\n ('ee', None),\n ('lt', None),\n ('lv', None),\n ]\n\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc, '''\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n ''', backend=postgresql, request=request)\n\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])\n\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={\n '_id': _id,\n 'id': 1,\n 'name': 'Base location',\n 'type': 'city'\n })\n app.post('/datasets/gov/example/base/City', json={\n '_id': _id,\n 'name': 'City',\n 'population': 100\n })\n\n resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')\n assert resp.json()['_data'] == [\n {\n '_base': {'name': 'Base location', 'type': 'city'},\n 'id': 1,\n 'name': 'City',\n 'population': 100\n }\n ]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {\n '_revision': revision\n }\n",
"step-ids": [
43,
48,
56,
58,
63
]
}
|
[
43,
48,
56,
58,
63
] |
#! /usr/bin/env python3
__all__ = [
'FrameCorners',
'CornerStorage',
'build',
'dump',
'load',
'draw',
'without_short_tracks'
]
import click
import cv2
import numpy as np
import pims
from _corners import FrameCorners, CornerStorage, StorageImpl
from _corners import dump, load, draw, without_short_tracks, create_cli
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
def to_uint8_image(img):
img = img * 255.0
img = np.round(img)
return img.astype(np.uint8)
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask,
center=(x, y),
radius=self.MIN_DISTANCE,
color=0,
thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None, quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img,
mask=mask,
maxCorners=num_corners,
qualityLevel=quality_level,
minDistance=self.MIN_DISTANCE,
blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img = None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(old_img),
to_uint8_image(new_img),
prevPts=points,
nextPts=None,
winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK,
criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img,
self.MAX_CORNERS - len(points),
mask,
self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
def _build_impl(frame_sequence: pims.FramesSequence,
builder: _CornerStorageBuilder) -> None:
cornerTracker = CornerTracker()
image_0 = frame_sequence[0]
corners = cornerTracker.get_corners(image_0)
builder.set_corners_at_frame(0, corners)
for frame, image_1 in enumerate(frame_sequence[1:], 1):
corners = cornerTracker.get_corners(image_1, image_0, corners)
builder.set_corners_at_frame(frame, corners)
image_0 = image_1
def build(frame_sequence: pims.FramesSequence,
progress: bool = True) -> CornerStorage:
"""
Build corners for all frames of a frame sequence.
:param frame_sequence: grayscale float32 frame sequence.
:param progress: enable/disable building progress bar.
:return: corners for all frames of given sequence.
"""
if progress:
with click.progressbar(length=len(frame_sequence),
label='Calculating corners') as progress_bar:
builder = _CornerStorageBuilder(progress_bar)
_build_impl(frame_sequence, builder)
else:
builder = _CornerStorageBuilder()
_build_impl(frame_sequence, builder)
corner_storage = builder.build_corner_storage()
final_storage = without_short_tracks(corner_storage, min_len=20)
return final_storage
if __name__ == '__main__':
create_cli(build)() # pylint:disable=no-value-for-parameter
|
normal
|
{
"blob_id": "0b5fb649dc421187820677ce75f3cd0e804c18a3",
"index": 7055,
"step-1": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\n<mask token>\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\n<mask token>\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence, builder:\n _CornerStorageBuilder) ->None:\n cornerTracker = CornerTracker()\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)()\n",
"step-4": "__all__ = ['FrameCorners', 'CornerStorage', 'build', 'dump', 'load', 'draw',\n 'without_short_tracks']\n<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence, builder:\n _CornerStorageBuilder) ->None:\n cornerTracker = CornerTracker()\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)()\n",
"step-5": "#! /usr/bin/env python3\n\n__all__ = [\n 'FrameCorners',\n 'CornerStorage',\n 'build',\n 'dump',\n 'load',\n 'draw',\n 'without_short_tracks'\n]\n\nimport click\nimport cv2\nimport numpy as np\nimport pims\n\nfrom _corners import FrameCorners, CornerStorage, StorageImpl\nfrom _corners import dump, load, draw, without_short_tracks, create_cli\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask,\n center=(x, y),\n radius=self.MIN_DISTANCE,\n color=0,\n thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None, quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img,\n mask=mask,\n maxCorners=num_corners,\n qualityLevel=quality_level,\n minDistance=self.MIN_DISTANCE,\n blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img = None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(old_img),\n to_uint8_image(new_img),\n prevPts=points,\n nextPts=None,\n winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK,\n criteria=self.TERM_CRITERIA)\n\n status = status.squeeze()\n found = np.where(status == 1)\n\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img,\n self.MAX_CORNERS - len(points),\n mask,\n self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence,\n builder: _CornerStorageBuilder) -> None:\n cornerTracker = CornerTracker()\n\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence,\n progress: bool = True) -> CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence),\n label='Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)() # pylint:disable=no-value-for-parameter",
"step-ids": [
10,
12,
14,
15,
17
]
}
|
[
10,
12,
14,
15,
17
] |
import datetime
count = 0
for y in xrange(1901,2001):
for m in xrange(1,13):
if datetime.date(y,m,1).weekday() == 6:
count += 1
print count
|
normal
|
{
"blob_id": "7430e17d1c424362399cf09a0c3ecae825d04567",
"index": 2996,
"step-1": "import datetime\ncount = 0\nfor y in xrange(1901,2001):\n\tfor m in xrange(1,13):\n\t\tif datetime.date(y,m,1).weekday() == 6:\n\t\t\tcount += 1\n\nprint count\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def predict(text, phobert, tokenizer):
model = load_model('model.h5')
X_test = word2vec(text, phobert, tokenizer)
x_test_tensor = tf.convert_to_tensor(X_test)
X_tests = []
X_tests.append(x_test_tensor)
X_tests = tf.convert_to_tensor(X_tests)
y = model.predict(X_tests)
y_predict = np.argmax(y, axis=-1)
print(y_predict + 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('bat dau')
def predict(text, phobert, tokenizer):
model = load_model('model.h5')
X_test = word2vec(text, phobert, tokenizer)
x_test_tensor = tf.convert_to_tensor(X_test)
X_tests = []
X_tests.append(x_test_tensor)
X_tests = tf.convert_to_tensor(X_tests)
y = model.predict(X_tests)
y_predict = np.argmax(y, axis=-1)
print(y_predict + 1)
if __name__ == '__main__':
print('1 Chỗ này hơi lâu bạn đợi tí')
phobert = AutoModel.from_pretrained('vinai/phobert-base')
print('2')
tokenizer = AutoTokenizer.from_pretrained('vinai/phobert-base',
use_fast=False)
print('3')
predict('tôi làm giấy X ở đâu', phobert, tokenizer)
print('4')
predict('tôi làm giấy X ở đâu', phobert, tokenizer)
print('5')
predict('tôi làm giấy X cần những gì', phobert, tokenizer)
<|reserved_special_token_1|>
from utils import *
from wordEmbedding import *
print('bat dau')
def predict(text, phobert, tokenizer):
model = load_model('model.h5')
X_test = word2vec(text, phobert, tokenizer)
x_test_tensor = tf.convert_to_tensor(X_test)
X_tests = []
X_tests.append(x_test_tensor)
X_tests = tf.convert_to_tensor(X_tests)
y = model.predict(X_tests)
y_predict = np.argmax(y, axis=-1)
print(y_predict + 1)
if __name__ == '__main__':
print('1 Chỗ này hơi lâu bạn đợi tí')
phobert = AutoModel.from_pretrained('vinai/phobert-base')
print('2')
tokenizer = AutoTokenizer.from_pretrained('vinai/phobert-base',
use_fast=False)
print('3')
predict('tôi làm giấy X ở đâu', phobert, tokenizer)
print('4')
predict('tôi làm giấy X ở đâu', phobert, tokenizer)
print('5')
predict('tôi làm giấy X cần những gì', phobert, tokenizer)
<|reserved_special_token_1|>
from utils import *
from wordEmbedding import *
print("bat dau")
def predict(text, phobert, tokenizer):
model = load_model('model.h5')
X_test = word2vec(text, phobert, tokenizer)
x_test_tensor = tf.convert_to_tensor(X_test)
X_tests = []
X_tests.append(x_test_tensor)
X_tests = tf.convert_to_tensor(X_tests)
y = model.predict(X_tests)
y_predict = np.argmax(y, axis=-1)
print(y_predict+1)
if __name__ == "__main__":
print("1 Chỗ này hơi lâu bạn đợi tí")
phobert = AutoModel.from_pretrained("vinai/phobert-base")
print("2")
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base", use_fast=False)
print("3")
predict("tôi làm giấy X ở đâu", phobert, tokenizer)
print("4")
predict("tôi làm giấy X ở đâu", phobert, tokenizer)
print("5")
predict("tôi làm giấy X cần những gì", phobert, tokenizer)
|
flexible
|
{
"blob_id": "d2c9ee64472c74767812d842d2c49eec962e28c6",
"index": 4451,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict(text, phobert, tokenizer):\n model = load_model('model.h5')\n X_test = word2vec(text, phobert, tokenizer)\n x_test_tensor = tf.convert_to_tensor(X_test)\n X_tests = []\n X_tests.append(x_test_tensor)\n X_tests = tf.convert_to_tensor(X_tests)\n y = model.predict(X_tests)\n y_predict = np.argmax(y, axis=-1)\n print(y_predict + 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint('bat dau')\n\n\ndef predict(text, phobert, tokenizer):\n model = load_model('model.h5')\n X_test = word2vec(text, phobert, tokenizer)\n x_test_tensor = tf.convert_to_tensor(X_test)\n X_tests = []\n X_tests.append(x_test_tensor)\n X_tests = tf.convert_to_tensor(X_tests)\n y = model.predict(X_tests)\n y_predict = np.argmax(y, axis=-1)\n print(y_predict + 1)\n\n\nif __name__ == '__main__':\n print('1 Chỗ này hơi lâu bạn đợi tí')\n phobert = AutoModel.from_pretrained('vinai/phobert-base')\n print('2')\n tokenizer = AutoTokenizer.from_pretrained('vinai/phobert-base',\n use_fast=False)\n print('3')\n predict('tôi làm giấy X ở đâu', phobert, tokenizer)\n print('4')\n predict('tôi làm giấy X ở đâu', phobert, tokenizer)\n print('5')\n predict('tôi làm giấy X cần những gì', phobert, tokenizer)\n",
"step-4": "from utils import *\nfrom wordEmbedding import *\nprint('bat dau')\n\n\ndef predict(text, phobert, tokenizer):\n model = load_model('model.h5')\n X_test = word2vec(text, phobert, tokenizer)\n x_test_tensor = tf.convert_to_tensor(X_test)\n X_tests = []\n X_tests.append(x_test_tensor)\n X_tests = tf.convert_to_tensor(X_tests)\n y = model.predict(X_tests)\n y_predict = np.argmax(y, axis=-1)\n print(y_predict + 1)\n\n\nif __name__ == '__main__':\n print('1 Chỗ này hơi lâu bạn đợi tí')\n phobert = AutoModel.from_pretrained('vinai/phobert-base')\n print('2')\n tokenizer = AutoTokenizer.from_pretrained('vinai/phobert-base',\n use_fast=False)\n print('3')\n predict('tôi làm giấy X ở đâu', phobert, tokenizer)\n print('4')\n predict('tôi làm giấy X ở đâu', phobert, tokenizer)\n print('5')\n predict('tôi làm giấy X cần những gì', phobert, tokenizer)\n",
"step-5": "from utils import *\nfrom wordEmbedding import *\nprint(\"bat dau\")\n\ndef predict(text, phobert, tokenizer):\n model = load_model('model.h5')\n X_test = word2vec(text, phobert, tokenizer)\n x_test_tensor = tf.convert_to_tensor(X_test)\n\n X_tests = []\n X_tests.append(x_test_tensor)\n\n X_tests = tf.convert_to_tensor(X_tests)\n y = model.predict(X_tests)\n y_predict = np.argmax(y, axis=-1)\n\n print(y_predict+1)\n\nif __name__ == \"__main__\":\n print(\"1 Chỗ này hơi lâu bạn đợi tí\")\n phobert = AutoModel.from_pretrained(\"vinai/phobert-base\")\n print(\"2\")\n tokenizer = AutoTokenizer.from_pretrained(\"vinai/phobert-base\", use_fast=False)\n print(\"3\")\n predict(\"tôi làm giấy X ở đâu\", phobert, tokenizer)\n print(\"4\")\n predict(\"tôi làm giấy X ở đâu\", phobert, tokenizer)\n print(\"5\")\n predict(\"tôi làm giấy X cần những gì\", phobert, tokenizer)\n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
seesion = requests.Session()
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'
}
cookie = {'Cookie':
'_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'
}
html = requests.get('https://order.mi.com/portal?r=92853.1505118552',
cookies=cookie, headers=header).content
print(html.decode('utf-8'))
|
normal
|
{
"blob_id": "8c652f30cd256912512b6b91d1682af7da0ff915",
"index": 8265,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(html.decode('utf-8'))\n",
"step-3": "<mask token>\nseesion = requests.Session()\nheader = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'\n }\ncookie = {'Cookie':\n '_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'\n }\nhtml = requests.get('https://order.mi.com/portal?r=92853.1505118552',\n cookies=cookie, headers=header).content\nprint(html.decode('utf-8'))\n",
"step-4": "import requests\nseesion = requests.Session()\nheader = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'\n }\ncookie = {'Cookie':\n '_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'\n }\nhtml = requests.get('https://order.mi.com/portal?r=92853.1505118552',\n cookies=cookie, headers=header).content\nprint(html.decode('utf-8'))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import SCons.Util
import xml.dom.minidom, re, os.path
################################################################################
# DocBook pseudobuilder
# TODO: Only generate the output formats that are known
################################################################################
def generate(env) :
def remove_doctype(target, source, env) :
f = open(str(target[0]))
output = []
for line in f.readlines() :
output.append(re.sub("^<!DOCTYPE .*", "", line))
f.close()
f = open(str(target[0]), 'wb')
for line in output :
f.write(line)
f.close()
def buildDocBook(env, source) :
db_env = env.Clone()
db_env["XMLCATALOGS"] = [db_env["DOCBOOK_XML"]]
# PDF generation
fo = db_env.XSLT(os.path.splitext(source)[0] + ".fo", source,
XSLTSTYLESHEET = db_env["DOCBOOK_XSL_FO"])
pdf = db_env.FO(fo)
# HTML generation
db_env.XSLT(os.path.splitext(source)[0] + ".html", source,
XSLTSTYLESHEET = db_env["DOCBOOK_XSL_HTML"])
# WordPress generation
wp_params = [("wordpress.dir", env.get("DOCBOOK_WP_DIR", "../../wordpress"))]
wp_pdf_url = env.get("DOCBOOK_WP_PDF_URL", pdf[0].name)
if len(wp_pdf_url) > 0 :
wp_params.append(("pdf.url", wp_pdf_url))
wp_params.append(("pdf.icon", env.get("DOCBOOK_WP_PDF_ICON", "/icons/pdf.png")))
wp = db_env.XSLT(os.path.splitext(source)[0] + ".wp.php", source,
XSLTSTYLESHEET = db_env["DOCBOOK_XSL_WP"],
XSLTPARAMS = wp_params + env.get("XSLTPARAMS", []))
db_env.AddPostAction(wp, SCons.Action.Action(remove_doctype, cmdstr = "$FIXCOMSTR"))
env.AddMethod(buildDocBook, "DocBook")
def exists(env) :
return True
|
normal
|
{
"blob_id": "cae49da8dd436fc51b472c4a88703d8bc6c79bda",
"index": 427,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate(env):\n\n def remove_doctype(target, source, env):\n f = open(str(target[0]))\n output = []\n for line in f.readlines():\n output.append(re.sub('^<!DOCTYPE .*', '', line))\n f.close()\n f = open(str(target[0]), 'wb')\n for line in output:\n f.write(line)\n f.close()\n\n def buildDocBook(env, source):\n db_env = env.Clone()\n db_env['XMLCATALOGS'] = [db_env['DOCBOOK_XML']]\n fo = db_env.XSLT(os.path.splitext(source)[0] + '.fo', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_FO'])\n pdf = db_env.FO(fo)\n db_env.XSLT(os.path.splitext(source)[0] + '.html', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_HTML'])\n wp_params = [('wordpress.dir', env.get('DOCBOOK_WP_DIR',\n '../../wordpress'))]\n wp_pdf_url = env.get('DOCBOOK_WP_PDF_URL', pdf[0].name)\n if len(wp_pdf_url) > 0:\n wp_params.append(('pdf.url', wp_pdf_url))\n wp_params.append(('pdf.icon', env.get('DOCBOOK_WP_PDF_ICON',\n '/icons/pdf.png')))\n wp = db_env.XSLT(os.path.splitext(source)[0] + '.wp.php', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_WP'], XSLTPARAMS=wp_params +\n env.get('XSLTPARAMS', []))\n db_env.AddPostAction(wp, SCons.Action.Action(remove_doctype, cmdstr\n ='$FIXCOMSTR'))\n env.AddMethod(buildDocBook, 'DocBook')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generate(env):\n\n def remove_doctype(target, source, env):\n f = open(str(target[0]))\n output = []\n for line in f.readlines():\n output.append(re.sub('^<!DOCTYPE .*', '', line))\n f.close()\n f = open(str(target[0]), 'wb')\n for line in output:\n f.write(line)\n f.close()\n\n def buildDocBook(env, source):\n db_env = env.Clone()\n db_env['XMLCATALOGS'] = [db_env['DOCBOOK_XML']]\n fo = db_env.XSLT(os.path.splitext(source)[0] + '.fo', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_FO'])\n pdf = db_env.FO(fo)\n db_env.XSLT(os.path.splitext(source)[0] + '.html', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_HTML'])\n wp_params = [('wordpress.dir', env.get('DOCBOOK_WP_DIR',\n '../../wordpress'))]\n wp_pdf_url = env.get('DOCBOOK_WP_PDF_URL', pdf[0].name)\n if len(wp_pdf_url) > 0:\n wp_params.append(('pdf.url', wp_pdf_url))\n wp_params.append(('pdf.icon', env.get('DOCBOOK_WP_PDF_ICON',\n '/icons/pdf.png')))\n wp = db_env.XSLT(os.path.splitext(source)[0] + '.wp.php', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_WP'], XSLTPARAMS=wp_params +\n env.get('XSLTPARAMS', []))\n db_env.AddPostAction(wp, SCons.Action.Action(remove_doctype, cmdstr\n ='$FIXCOMSTR'))\n env.AddMethod(buildDocBook, 'DocBook')\n\n\ndef exists(env):\n return True\n",
"step-4": "import SCons.Util\nimport xml.dom.minidom, re, os.path\n\n\ndef generate(env):\n\n def remove_doctype(target, source, env):\n f = open(str(target[0]))\n output = []\n for line in f.readlines():\n output.append(re.sub('^<!DOCTYPE .*', '', line))\n f.close()\n f = open(str(target[0]), 'wb')\n for line in output:\n f.write(line)\n f.close()\n\n def buildDocBook(env, source):\n db_env = env.Clone()\n db_env['XMLCATALOGS'] = [db_env['DOCBOOK_XML']]\n fo = db_env.XSLT(os.path.splitext(source)[0] + '.fo', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_FO'])\n pdf = db_env.FO(fo)\n db_env.XSLT(os.path.splitext(source)[0] + '.html', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_HTML'])\n wp_params = [('wordpress.dir', env.get('DOCBOOK_WP_DIR',\n '../../wordpress'))]\n wp_pdf_url = env.get('DOCBOOK_WP_PDF_URL', pdf[0].name)\n if len(wp_pdf_url) > 0:\n wp_params.append(('pdf.url', wp_pdf_url))\n wp_params.append(('pdf.icon', env.get('DOCBOOK_WP_PDF_ICON',\n '/icons/pdf.png')))\n wp = db_env.XSLT(os.path.splitext(source)[0] + '.wp.php', source,\n XSLTSTYLESHEET=db_env['DOCBOOK_XSL_WP'], XSLTPARAMS=wp_params +\n env.get('XSLTPARAMS', []))\n db_env.AddPostAction(wp, SCons.Action.Action(remove_doctype, cmdstr\n ='$FIXCOMSTR'))\n env.AddMethod(buildDocBook, 'DocBook')\n\n\ndef exists(env):\n return True\n",
"step-5": "import SCons.Util\nimport xml.dom.minidom, re, os.path\n\n################################################################################\n# DocBook pseudobuilder\n# TODO: Only generate the output formats that are known\n################################################################################\n\ndef generate(env) :\n def remove_doctype(target, source, env) :\n f = open(str(target[0]))\n output = []\n for line in f.readlines() :\n output.append(re.sub(\"^<!DOCTYPE .*\", \"\", line))\n f.close()\n f = open(str(target[0]), 'wb')\n for line in output :\n f.write(line)\n f.close()\n\n def buildDocBook(env, source) :\n db_env = env.Clone()\n db_env[\"XMLCATALOGS\"] = [db_env[\"DOCBOOK_XML\"]]\n\n # PDF generation\n fo = db_env.XSLT(os.path.splitext(source)[0] + \".fo\", source, \n XSLTSTYLESHEET = db_env[\"DOCBOOK_XSL_FO\"])\n pdf = db_env.FO(fo)\n\n # HTML generation\n db_env.XSLT(os.path.splitext(source)[0] + \".html\", source, \n XSLTSTYLESHEET = db_env[\"DOCBOOK_XSL_HTML\"])\n\n # WordPress generation\n wp_params = [(\"wordpress.dir\", env.get(\"DOCBOOK_WP_DIR\", \"../../wordpress\"))]\n wp_pdf_url = env.get(\"DOCBOOK_WP_PDF_URL\", pdf[0].name)\n if len(wp_pdf_url) > 0 :\n wp_params.append((\"pdf.url\", wp_pdf_url))\n wp_params.append((\"pdf.icon\", env.get(\"DOCBOOK_WP_PDF_ICON\", \"/icons/pdf.png\")))\n wp = db_env.XSLT(os.path.splitext(source)[0] + \".wp.php\", source, \n XSLTSTYLESHEET = db_env[\"DOCBOOK_XSL_WP\"],\n XSLTPARAMS = wp_params + env.get(\"XSLTPARAMS\", []))\n db_env.AddPostAction(wp, SCons.Action.Action(remove_doctype, cmdstr = \"$FIXCOMSTR\"))\n\n env.AddMethod(buildDocBook, \"DocBook\")\n \ndef exists(env) :\n return True\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import models
import json
import reports.models
import common.ot_utils
def analyze_raw_reports(clean=True):
if clean:
delete_all_reports()
COUNT = 100
offset = 0
while True:
cont = analyze_raw_reports_subset(offset,COUNT)
offset += COUNT
if not cont:
return
def analyze_raw_reports_subset(offset,count):
items = _collect_items(offset,count)
if items:
dump_items(items)
return True
return False
def dump_items(items):
wifis = []
locs = []
for (idx,item) in enumerate(items):
if idx % 100 == 0:
print '%d/%d' % (idx,len(items))
if 'wifi' in item.keys():
report_dt = common.ot_utils.get_utc_time_from_timestamp(float(item['time'])/1000)
m = models.Report(device_id=item['device_id'],timestamp=report_dt)
m.save()
item_loc = item.get('location_api')
if item_loc:
loc = models.LocationInfo(report=m,
lat=item_loc['lat'],
lon=item_loc['long'],
provider=item_loc['provider'],
timestamp = common.ot_utils.get_utc_time_from_timestamp(float(item_loc['time'])/1000),
accuracy = item_loc['accuracy'])
locs.append(loc)
for wifi in item['wifi']:
wifis.append(models.SingleWifiReport(SSID=wifi['SSID'],
signal=wifi['signal'],
frequency=wifi['frequency'],
key=wifi['key'],
report=m))
print 'Saving all dependant objects'
models.SingleWifiReport.objects.bulk_create(wifis)
models.LocationInfo.objects.bulk_create(locs)
def delete_all_reports():
common.ot_utils.delete_from_model(models.SingleWifiReport)
common.ot_utils.delete_from_model(models.LocationInfo)
common.ot_utils.delete_from_model(models.Report)
def _collect_items(offset,count):
all_reports_count = reports.models.RawReport.objects.count()
print '*** offset = %d count = %d all_reports_count = %d' % (offset,count,all_reports_count)
all_reports = reports.models.RawReport.objects.all()[offset:offset+count]
result = []
for rj in all_reports:
items = json.loads(rj.text)['items']
result.extend(items)
return result
|
normal
|
{
"blob_id": "c3527363cfc29ab7d598fe232d784b05ec2ef069",
"index": 388,
"step-1": "import models\nimport json\nimport reports.models\nimport common.ot_utils\n\ndef analyze_raw_reports(clean=True):\n if clean:\n delete_all_reports()\n COUNT = 100\n offset = 0\n while True:\n cont = analyze_raw_reports_subset(offset,COUNT)\n offset += COUNT\n if not cont:\n return \n \ndef analyze_raw_reports_subset(offset,count):\n items = _collect_items(offset,count)\n if items:\n dump_items(items)\n return True\n return False\n\ndef dump_items(items):\n wifis = []\n locs = []\n for (idx,item) in enumerate(items):\n if idx % 100 == 0:\n print '%d/%d' % (idx,len(items))\n if 'wifi' in item.keys():\n report_dt = common.ot_utils.get_utc_time_from_timestamp(float(item['time'])/1000)\n m = models.Report(device_id=item['device_id'],timestamp=report_dt)\n m.save()\n item_loc = item.get('location_api')\n if item_loc:\n loc = models.LocationInfo(report=m,\n lat=item_loc['lat'],\n lon=item_loc['long'],\n provider=item_loc['provider'],\n timestamp = common.ot_utils.get_utc_time_from_timestamp(float(item_loc['time'])/1000),\n accuracy = item_loc['accuracy'])\n locs.append(loc)\n for wifi in item['wifi']:\n wifis.append(models.SingleWifiReport(SSID=wifi['SSID'],\n signal=wifi['signal'],\n frequency=wifi['frequency'],\n key=wifi['key'],\n report=m))\n print 'Saving all dependant objects'\n models.SingleWifiReport.objects.bulk_create(wifis)\n models.LocationInfo.objects.bulk_create(locs)\n \n \n\n\ndef delete_all_reports():\n common.ot_utils.delete_from_model(models.SingleWifiReport)\n common.ot_utils.delete_from_model(models.LocationInfo)\n common.ot_utils.delete_from_model(models.Report)\n \ndef _collect_items(offset,count):\n all_reports_count = reports.models.RawReport.objects.count()\n print '*** offset = %d count = %d all_reports_count = %d' % (offset,count,all_reports_count)\n all_reports = reports.models.RawReport.objects.all()[offset:offset+count]\n result = []\n for rj in all_reports:\n items = json.loads(rj.text)['items']\n result.extend(items)\n return result\n\n\n \n \n\n\n\n\n\n\n \n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Game:
location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]
velocity: list[int, int] = [0, 0]
current_player: Player = None
other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}
connection: socket.socket
font: pygame.font.Font
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def connect_to_server(self):
self.connection.connect((c.HOST, c.PORT))
def listen_to_server(self):
ins, outs, ex = select.select([self.connection], [], [], 0)
for inm in ins:
received_data = inm.recv(c.BUFFSIZE)
event: Event = pickle.loads(received_data)
print('<<<', event)
if isinstance(event, CurrentPlayerEvent):
pygame.display.set_caption(
f'Socket Game - {event.player.nickname}')
self.current_player = event.player
elif isinstance(event, PlayerDidMoveEvent):
self.update_player(event.player, event.location)
elif isinstance(event, PlayerJoinedEvent):
self.update_player(event.player)
def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /
2)):
self.other_players[player.nickname] = player, location
def update_server(self):
if self.current_player is not None:
self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.
current_player, (self.location[0], self.location[1]))))
def game_loop(self):
self.listen_to_server()
self.event_handling()
self.update_location()
self.render()
self.update_server()
self.clock.tick(60)
def update_location(self):
oldx, oldy = self.location
vx, vy = self.velocity
newx, newy = oldx + vx, oldy + vy
if newx > c.WIDTH - c.PLAYER_SIZE:
newx = c.WIDTH - c.PLAYER_SIZE
if newx < 0:
newx = 0
if newy > c.HEIGHT - c.PLAYER_SIZE:
newy = c.HEIGHT - c.PLAYER_SIZE
if newy < 0:
newy = 0
self.location = [newx, newy]
def render_player(self, player: Player, location: Tuple[int, int]):
x, y = location
img = self.font.render(player.nickname, True, player.color)
pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c
.PLAYER_SIZE))
self.screen.blit(img, (x, y - img.get_height()))
def render(self):
self.screen.fill((255, 255, 255))
if self.current_player is not None:
self.render_player(self.current_player, (self.location[0], self
.location[1]))
for nickname, (player, location) in self.other_players.items():
self.render_player(player, location)
pygame.display.flip()
def event_handling(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.velocity[0] = -c.MOVEMENT_SPEED
if event.key == K_RIGHT:
self.velocity[0] = c.MOVEMENT_SPEED
if event.key == K_UP:
self.velocity[1] = -c.MOVEMENT_SPEED
if event.key == K_DOWN:
self.velocity[1] = c.MOVEMENT_SPEED
if event.type == KEYUP:
if event.key == K_LEFT:
self.velocity[0] = 0
if event.key == K_RIGHT:
self.velocity[0] = 0
if event.key == K_UP:
self.velocity[1] = 0
if event.key == K_DOWN:
self.velocity[1] = 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Game:
location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]
velocity: list[int, int] = [0, 0]
current_player: Player = None
other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}
connection: socket.socket
font: pygame.font.Font
def __init__(self):
pygame.init()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))
pygame.display.set_caption('Socket Game')
self.clock = pygame.time.Clock()
self.screen.fill('white')
self.font = pygame.font.SysFont(None, c.FONT_SIZE)
<|reserved_special_token_0|>
def connect_to_server(self):
self.connection.connect((c.HOST, c.PORT))
def listen_to_server(self):
ins, outs, ex = select.select([self.connection], [], [], 0)
for inm in ins:
received_data = inm.recv(c.BUFFSIZE)
event: Event = pickle.loads(received_data)
print('<<<', event)
if isinstance(event, CurrentPlayerEvent):
pygame.display.set_caption(
f'Socket Game - {event.player.nickname}')
self.current_player = event.player
elif isinstance(event, PlayerDidMoveEvent):
self.update_player(event.player, event.location)
elif isinstance(event, PlayerJoinedEvent):
self.update_player(event.player)
def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /
2)):
self.other_players[player.nickname] = player, location
def update_server(self):
if self.current_player is not None:
self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.
current_player, (self.location[0], self.location[1]))))
def game_loop(self):
self.listen_to_server()
self.event_handling()
self.update_location()
self.render()
self.update_server()
self.clock.tick(60)
def update_location(self):
oldx, oldy = self.location
vx, vy = self.velocity
newx, newy = oldx + vx, oldy + vy
if newx > c.WIDTH - c.PLAYER_SIZE:
newx = c.WIDTH - c.PLAYER_SIZE
if newx < 0:
newx = 0
if newy > c.HEIGHT - c.PLAYER_SIZE:
newy = c.HEIGHT - c.PLAYER_SIZE
if newy < 0:
newy = 0
self.location = [newx, newy]
def render_player(self, player: Player, location: Tuple[int, int]):
x, y = location
img = self.font.render(player.nickname, True, player.color)
pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c
.PLAYER_SIZE))
self.screen.blit(img, (x, y - img.get_height()))
def render(self):
self.screen.fill((255, 255, 255))
if self.current_player is not None:
self.render_player(self.current_player, (self.location[0], self
.location[1]))
for nickname, (player, location) in self.other_players.items():
self.render_player(player, location)
pygame.display.flip()
def event_handling(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.velocity[0] = -c.MOVEMENT_SPEED
if event.key == K_RIGHT:
self.velocity[0] = c.MOVEMENT_SPEED
if event.key == K_UP:
self.velocity[1] = -c.MOVEMENT_SPEED
if event.key == K_DOWN:
self.velocity[1] = c.MOVEMENT_SPEED
if event.type == KEYUP:
if event.key == K_LEFT:
self.velocity[0] = 0
if event.key == K_RIGHT:
self.velocity[0] = 0
if event.key == K_UP:
self.velocity[1] = 0
if event.key == K_DOWN:
self.velocity[1] = 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Game:
location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]
velocity: list[int, int] = [0, 0]
current_player: Player = None
other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}
connection: socket.socket
font: pygame.font.Font
def __init__(self):
pygame.init()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))
pygame.display.set_caption('Socket Game')
self.clock = pygame.time.Clock()
self.screen.fill('white')
self.font = pygame.font.SysFont(None, c.FONT_SIZE)
def start(self):
self.connect_to_server()
while True:
self.game_loop()
def connect_to_server(self):
self.connection.connect((c.HOST, c.PORT))
def listen_to_server(self):
ins, outs, ex = select.select([self.connection], [], [], 0)
for inm in ins:
received_data = inm.recv(c.BUFFSIZE)
event: Event = pickle.loads(received_data)
print('<<<', event)
if isinstance(event, CurrentPlayerEvent):
pygame.display.set_caption(
f'Socket Game - {event.player.nickname}')
self.current_player = event.player
elif isinstance(event, PlayerDidMoveEvent):
self.update_player(event.player, event.location)
elif isinstance(event, PlayerJoinedEvent):
self.update_player(event.player)
def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /
2)):
self.other_players[player.nickname] = player, location
def update_server(self):
if self.current_player is not None:
self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.
current_player, (self.location[0], self.location[1]))))
def game_loop(self):
self.listen_to_server()
self.event_handling()
self.update_location()
self.render()
self.update_server()
self.clock.tick(60)
def update_location(self):
oldx, oldy = self.location
vx, vy = self.velocity
newx, newy = oldx + vx, oldy + vy
if newx > c.WIDTH - c.PLAYER_SIZE:
newx = c.WIDTH - c.PLAYER_SIZE
if newx < 0:
newx = 0
if newy > c.HEIGHT - c.PLAYER_SIZE:
newy = c.HEIGHT - c.PLAYER_SIZE
if newy < 0:
newy = 0
self.location = [newx, newy]
def render_player(self, player: Player, location: Tuple[int, int]):
x, y = location
img = self.font.render(player.nickname, True, player.color)
pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c
.PLAYER_SIZE))
self.screen.blit(img, (x, y - img.get_height()))
def render(self):
self.screen.fill((255, 255, 255))
if self.current_player is not None:
self.render_player(self.current_player, (self.location[0], self
.location[1]))
for nickname, (player, location) in self.other_players.items():
self.render_player(player, location)
pygame.display.flip()
def event_handling(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.velocity[0] = -c.MOVEMENT_SPEED
if event.key == K_RIGHT:
self.velocity[0] = c.MOVEMENT_SPEED
if event.key == K_UP:
self.velocity[1] = -c.MOVEMENT_SPEED
if event.key == K_DOWN:
self.velocity[1] = c.MOVEMENT_SPEED
if event.type == KEYUP:
if event.key == K_LEFT:
self.velocity[0] = 0
if event.key == K_RIGHT:
self.velocity[0] = 0
if event.key == K_UP:
self.velocity[1] = 0
if event.key == K_DOWN:
self.velocity[1] = 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import pickle
import select
import socket
import sys
from threading import Thread
from typing import Dict, Tuple
import pygame
from pygame.locals import *
import c
from models import *
class Game:
location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]
velocity: list[int, int] = [0, 0]
current_player: Player = None
other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}
connection: socket.socket
font: pygame.font.Font
def __init__(self):
pygame.init()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))
pygame.display.set_caption('Socket Game')
self.clock = pygame.time.Clock()
self.screen.fill('white')
self.font = pygame.font.SysFont(None, c.FONT_SIZE)
def start(self):
self.connect_to_server()
while True:
self.game_loop()
def connect_to_server(self):
self.connection.connect((c.HOST, c.PORT))
def listen_to_server(self):
ins, outs, ex = select.select([self.connection], [], [], 0)
for inm in ins:
received_data = inm.recv(c.BUFFSIZE)
event: Event = pickle.loads(received_data)
print('<<<', event)
if isinstance(event, CurrentPlayerEvent):
pygame.display.set_caption(
f'Socket Game - {event.player.nickname}')
self.current_player = event.player
elif isinstance(event, PlayerDidMoveEvent):
self.update_player(event.player, event.location)
elif isinstance(event, PlayerJoinedEvent):
self.update_player(event.player)
def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /
2)):
self.other_players[player.nickname] = player, location
def update_server(self):
if self.current_player is not None:
self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.
current_player, (self.location[0], self.location[1]))))
def game_loop(self):
self.listen_to_server()
self.event_handling()
self.update_location()
self.render()
self.update_server()
self.clock.tick(60)
def update_location(self):
oldx, oldy = self.location
vx, vy = self.velocity
newx, newy = oldx + vx, oldy + vy
if newx > c.WIDTH - c.PLAYER_SIZE:
newx = c.WIDTH - c.PLAYER_SIZE
if newx < 0:
newx = 0
if newy > c.HEIGHT - c.PLAYER_SIZE:
newy = c.HEIGHT - c.PLAYER_SIZE
if newy < 0:
newy = 0
self.location = [newx, newy]
def render_player(self, player: Player, location: Tuple[int, int]):
x, y = location
img = self.font.render(player.nickname, True, player.color)
pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c
.PLAYER_SIZE))
self.screen.blit(img, (x, y - img.get_height()))
def render(self):
self.screen.fill((255, 255, 255))
if self.current_player is not None:
self.render_player(self.current_player, (self.location[0], self
.location[1]))
for nickname, (player, location) in self.other_players.items():
self.render_player(player, location)
pygame.display.flip()
def event_handling(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.velocity[0] = -c.MOVEMENT_SPEED
if event.key == K_RIGHT:
self.velocity[0] = c.MOVEMENT_SPEED
if event.key == K_UP:
self.velocity[1] = -c.MOVEMENT_SPEED
if event.key == K_DOWN:
self.velocity[1] = c.MOVEMENT_SPEED
if event.type == KEYUP:
if event.key == K_LEFT:
self.velocity[0] = 0
if event.key == K_RIGHT:
self.velocity[0] = 0
if event.key == K_UP:
self.velocity[1] = 0
if event.key == K_DOWN:
self.velocity[1] = 0
if __name__ == '__main__':
s = Game()
s.start()
<|reserved_special_token_1|>
import pickle
import select
import socket
import sys
from threading import Thread
from typing import Dict, Tuple
import pygame
from pygame.locals import *
import c
from models import *
class Game:
location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]
velocity: list[int, int] = [0, 0]
current_player: Player = None
other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}
connection: socket.socket
font: pygame.font.Font
def __init__(self):
pygame.init()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))
pygame.display.set_caption('Socket Game')
self.clock = pygame.time.Clock()
self.screen.fill('white')
self.font = pygame.font.SysFont(None, c.FONT_SIZE)
def start(self):
self.connect_to_server()
while True:
self.game_loop()
def connect_to_server(self):
self.connection.connect((c.HOST, c.PORT))
def listen_to_server(self):
ins, outs, ex = select.select([self.connection], [], [], 0)
for inm in ins:
received_data = inm.recv(c.BUFFSIZE)
event: Event = pickle.loads(received_data)
print("<<<", event)
if isinstance(event, CurrentPlayerEvent):
pygame.display.set_caption(f'Socket Game - {event.player.nickname}')
self.current_player = event.player
elif isinstance(event, PlayerDidMoveEvent):
self.update_player(event.player, event.location)
elif isinstance(event, PlayerJoinedEvent):
self.update_player(event.player)
def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT / 2)):
self.other_players[player.nickname] = (player, location)
def update_server(self):
if self.current_player is not None:
self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.current_player, (
self.location[0], self.location[1],
))))
def game_loop(self):
self.listen_to_server()
self.event_handling()
self.update_location()
self.render()
self.update_server()
self.clock.tick(60)
def update_location(self):
oldx, oldy = self.location
vx, vy = self.velocity
newx, newy = oldx + vx, oldy + vy
if newx > c.WIDTH - c.PLAYER_SIZE:
newx = c.WIDTH - c.PLAYER_SIZE
if newx < 0:
newx = 0
if newy > c.HEIGHT - c.PLAYER_SIZE:
newy = c.HEIGHT - c.PLAYER_SIZE
if newy < 0:
newy = 0
self.location = [newx, newy]
def render_player(self, player: Player, location: Tuple[int, int]):
x, y = location
img = self.font.render(player.nickname, True, player.color)
pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c.PLAYER_SIZE))
self.screen.blit(img, (x, y - img.get_height()))
def render(self):
self.screen.fill((255, 255, 255))
if self.current_player is not None:
self.render_player(self.current_player, (self.location[0], self.location[1]))
for nickname, (player, location) in self.other_players.items():
self.render_player(player, location)
pygame.display.flip()
def event_handling(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT: self.velocity[0] = -c.MOVEMENT_SPEED
if event.key == K_RIGHT: self.velocity[0] = c.MOVEMENT_SPEED
if event.key == K_UP: self.velocity[1] = -c.MOVEMENT_SPEED
if event.key == K_DOWN: self.velocity[1] = c.MOVEMENT_SPEED
if event.type == KEYUP:
if event.key == K_LEFT: self.velocity[0] = 0
if event.key == K_RIGHT: self.velocity[0] = 0
if event.key == K_UP: self.velocity[1] = 0
if event.key == K_DOWN: self.velocity[1] = 0
if __name__ == "__main__":
s = Game()
s.start()
|
flexible
|
{
"blob_id": "418798369578e80ecbf82da802b23dc6ca922569",
"index": 7107,
"step-1": "<mask token>\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n <mask token>\n <mask token>\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n <mask token>\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n\n def start(self):\n self.connect_to_server()\n while True:\n self.game_loop()\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\n<mask token>\n",
"step-4": "import pickle\nimport select\nimport socket\nimport sys\nfrom threading import Thread\nfrom typing import Dict, Tuple\nimport pygame\nfrom pygame.locals import *\nimport c\nfrom models import *\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n\n def start(self):\n self.connect_to_server()\n while True:\n self.game_loop()\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\nif __name__ == '__main__':\n s = Game()\n s.start()\n",
"step-5": "import pickle\nimport select\nimport socket\nimport sys\nfrom threading import Thread\nfrom typing import Dict, Tuple\n\nimport pygame\nfrom pygame.locals import *\n\nimport c\nfrom models import *\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n\n def start(self):\n self.connect_to_server()\n while True:\n self.game_loop()\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print(\"<<<\", event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT / 2)):\n self.other_players[player.nickname] = (player, location)\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.current_player, (\n self.location[0], self.location[1],\n ))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c.PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self.location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT: self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT: self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP: self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN: self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT: self.velocity[0] = 0\n if event.key == K_RIGHT: self.velocity[0] = 0\n if event.key == K_UP: self.velocity[1] = 0\n if event.key == K_DOWN: self.velocity[1] = 0\n\n\nif __name__ == \"__main__\":\n s = Game()\n s.start()\n",
"step-ids": [
10,
11,
12,
14,
15
]
}
|
[
10,
11,
12,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
m.sort()
x -= sum(m)
print(n + x // m[0])
<|reserved_special_token_1|>
n, x = map(int, input().split())
m = [int(input()) for _ in range(n)]
m.sort()
x -= sum(m)
print(n + x // m[0])
|
flexible
|
{
"blob_id": "0ff398775fd13fb5fbd23bf2359bb31dff6bd38c",
"index": 9821,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nm.sort()\nx -= sum(m)\nprint(n + x // m[0])\n",
"step-3": "n, x = map(int, input().split())\nm = [int(input()) for _ in range(n)]\nm.sort()\nx -= sum(m)\nprint(n + x // m[0])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''Module main'''
import argparse
import api
import quoridor
import quoridorx
def analyser_commande():
'''Analyseur de ligne de commande.'''
parser = argparse.ArgumentParser(description='Jeu Quoridor - phase 3')
parser.add_argument("idul", help="IDUL du joueur.")
parser.add_argument("-l", '--lister', action='store_true',
help="Lister les identifiants de vos 20 dernières parties.")
# -a
parser.add_argument("-a", '--automatique', action='store_true',
help="Activer le mode automatique.")
# -x
parser.add_argument("-x", '--graphique', action='store_true',
help="Activer le mode graphique.")
return parser.parse_args()
if __name__ == "__main__":
COMMANDE = analyser_commande()
if COMMANDE.lister:
print(api.lister_parties(COMMANDE.idul))
# Mode automatique avec graphique (commande : python main.py -ax idul)
elif COMMANDE.automatique and COMMANDE.graphique:
DEBUTER = api.débuter_partie(COMMANDE.idul)
JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])
ID_PARTIE = DEBUTER[0]
JEU.afficher()
GAGNANT = True
while GAGNANT:
try:
COUP = JEU.jouer_coup(1)
JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])
JEU.liste_joueurs = JOUER['joueurs']
JEU.liste_murs = JOUER['murs']
JEU.afficher()
except StopIteration as err:
GAGNANT = False
print(f'Le gagnant est: {err}')
except RuntimeError as err:
print(err)
# Mode automatique (commande : python main.py -a idul)
elif COMMANDE.automatique:
DEBUTER = api.débuter_partie(COMMANDE.idul)
JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])
ID_PARTIE = DEBUTER[0]
print(JEU)
GAGNANT = True
while GAGNANT:
try:
COUP = JEU.jouer_coup(1)
JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])
JEU.liste_joueurs = JOUER['joueurs']
JEU.liste_murs = JOUER['murs']
print(JEU)
except StopIteration as err:
GAGNANT = False
print(f'Le gagnant est: {err}')
except RuntimeError as err:
print(err)
# Mode manuel avec graphique (commande : python main.py -x idul)
elif COMMANDE.graphique:
DEBUTER = api.débuter_partie(COMMANDE.idul)
JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])
ID_PARTIE = DEBUTER[0]
JEU.afficher()
GAGNANT = True
while GAGNANT:
OK_CHOIX = True
while OK_CHOIX:
CHOIX_COUP = input('Choisir votre coup("D","MH", "MV"): ')
POS = input('Entrer les coordonnées (x,y): ')
try:
JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)
OK_CHOIX = False
JEU.liste_joueurs = JOUER['joueurs']
JEU.liste_murs = JOUER['murs']
JEU.afficher()
except StopIteration as err:
OK_CHOIX = False
GAGNANT = False
print(f'Le gagnant est: {err}')
except RuntimeError as err:
print(err)
# Mode manuel contre le serveur (commande : python main.py idul)
else:
DEBUTER = api.débuter_partie(COMMANDE.idul)
JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])
ID_PARTIE = DEBUTER[0]
print(JEU)
GAGNANT = True
while GAGNANT:
OK_CHOIX = True
while OK_CHOIX:
CHOIX_COUP = input('Choisir votre coup("D","MH", "MV"): ')
POS = input('Entrer les coordonnées (x,y): ')
try:
JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)
OK_CHOIX = False
JEU.liste_joueurs = JOUER['joueurs']
JEU.liste_murs = JOUER['murs']
print(JEU)
except StopIteration as err:
OK_CHOIX = False
GAGNANT = False
print(f'Le gagnant est: {err}')
except RuntimeError as err:
print(err)
|
normal
|
{
"blob_id": "f69544a9123f1738cd7d21c1b4fc02dd73fb9d1b",
"index": 6008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef analyser_commande():\n \"\"\"Analyseur de ligne de commande.\"\"\"\n parser = argparse.ArgumentParser(description='Jeu Quoridor - phase 3')\n parser.add_argument('idul', help='IDUL du joueur.')\n parser.add_argument('-l', '--lister', action='store_true', help=\n 'Lister les identifiants de vos 20 dernières parties.')\n parser.add_argument('-a', '--automatique', action='store_true', help=\n 'Activer le mode automatique.')\n parser.add_argument('-x', '--graphique', action='store_true', help=\n 'Activer le mode graphique.')\n return parser.parse_args()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef analyser_commande():\n \"\"\"Analyseur de ligne de commande.\"\"\"\n parser = argparse.ArgumentParser(description='Jeu Quoridor - phase 3')\n parser.add_argument('idul', help='IDUL du joueur.')\n parser.add_argument('-l', '--lister', action='store_true', help=\n 'Lister les identifiants de vos 20 dernières parties.')\n parser.add_argument('-a', '--automatique', action='store_true', help=\n 'Activer le mode automatique.')\n parser.add_argument('-x', '--graphique', action='store_true', help=\n 'Activer le mode graphique.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n COMMANDE = analyser_commande()\n if COMMANDE.lister:\n print(api.lister_parties(COMMANDE.idul))\n elif COMMANDE.automatique and COMMANDE.graphique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n JEU.afficher()\n GAGNANT = True\n while GAGNANT:\n try:\n COUP = JEU.jouer_coup(1)\n JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n JEU.afficher()\n except StopIteration as err:\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n elif COMMANDE.automatique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n print(JEU)\n GAGNANT = True\n while GAGNANT:\n try:\n COUP = JEU.jouer_coup(1)\n JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n print(JEU)\n except StopIteration as err:\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n elif COMMANDE.graphique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n JEU.afficher()\n GAGNANT = True\n while GAGNANT:\n OK_CHOIX = True\n while OK_CHOIX:\n CHOIX_COUP = input('Choisir votre coup(\"D\",\"MH\", \"MV\"): ')\n POS = input('Entrer les coordonnées (x,y): ')\n try:\n JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)\n OK_CHOIX = False\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n JEU.afficher()\n except StopIteration as err:\n OK_CHOIX = False\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n else:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n print(JEU)\n GAGNANT = True\n while GAGNANT:\n OK_CHOIX = True\n while OK_CHOIX:\n CHOIX_COUP = input('Choisir votre coup(\"D\",\"MH\", \"MV\"): ')\n POS = input('Entrer les coordonnées (x,y): ')\n try:\n JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)\n OK_CHOIX = False\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n print(JEU)\n except StopIteration as err:\n OK_CHOIX = False\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n",
"step-4": "<mask token>\nimport argparse\nimport api\nimport quoridor\nimport quoridorx\n\n\ndef analyser_commande():\n \"\"\"Analyseur de ligne de commande.\"\"\"\n parser = argparse.ArgumentParser(description='Jeu Quoridor - phase 3')\n parser.add_argument('idul', help='IDUL du joueur.')\n parser.add_argument('-l', '--lister', action='store_true', help=\n 'Lister les identifiants de vos 20 dernières parties.')\n parser.add_argument('-a', '--automatique', action='store_true', help=\n 'Activer le mode automatique.')\n parser.add_argument('-x', '--graphique', action='store_true', help=\n 'Activer le mode graphique.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n COMMANDE = analyser_commande()\n if COMMANDE.lister:\n print(api.lister_parties(COMMANDE.idul))\n elif COMMANDE.automatique and COMMANDE.graphique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n JEU.afficher()\n GAGNANT = True\n while GAGNANT:\n try:\n COUP = JEU.jouer_coup(1)\n JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n JEU.afficher()\n except StopIteration as err:\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n elif COMMANDE.automatique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n print(JEU)\n GAGNANT = True\n while GAGNANT:\n try:\n COUP = JEU.jouer_coup(1)\n JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n print(JEU)\n except StopIteration as err:\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n elif COMMANDE.graphique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n JEU.afficher()\n GAGNANT = True\n while GAGNANT:\n OK_CHOIX = True\n while OK_CHOIX:\n CHOIX_COUP = input('Choisir votre coup(\"D\",\"MH\", \"MV\"): ')\n POS = input('Entrer les coordonnées (x,y): ')\n try:\n JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)\n OK_CHOIX = False\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n JEU.afficher()\n except StopIteration as err:\n OK_CHOIX = False\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n else:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n print(JEU)\n GAGNANT = True\n while GAGNANT:\n OK_CHOIX = True\n while OK_CHOIX:\n CHOIX_COUP = input('Choisir votre coup(\"D\",\"MH\", \"MV\"): ')\n POS = input('Entrer les coordonnées (x,y): ')\n try:\n JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)\n OK_CHOIX = False\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n print(JEU)\n except StopIteration as err:\n OK_CHOIX = False\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n",
"step-5": "'''Module main'''\nimport argparse\nimport api\nimport quoridor\nimport quoridorx\n\n\ndef analyser_commande():\n '''Analyseur de ligne de commande.'''\n parser = argparse.ArgumentParser(description='Jeu Quoridor - phase 3')\n\n parser.add_argument(\"idul\", help=\"IDUL du joueur.\")\n\n parser.add_argument(\"-l\", '--lister', action='store_true',\n help=\"Lister les identifiants de vos 20 dernières parties.\")\n # -a\n parser.add_argument(\"-a\", '--automatique', action='store_true',\n help=\"Activer le mode automatique.\")\n # -x\n parser.add_argument(\"-x\", '--graphique', action='store_true',\n help=\"Activer le mode graphique.\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n COMMANDE = analyser_commande()\n\n if COMMANDE.lister:\n print(api.lister_parties(COMMANDE.idul))\n\n # Mode automatique avec graphique (commande : python main.py -ax idul)\n elif COMMANDE.automatique and COMMANDE.graphique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n\n JEU.afficher()\n\n GAGNANT = True\n while GAGNANT:\n try:\n COUP = JEU.jouer_coup(1)\n\n JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n\n JEU.afficher()\n except StopIteration as err:\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n\n # Mode automatique (commande : python main.py -a idul)\n elif COMMANDE.automatique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n\n print(JEU)\n\n GAGNANT = True\n while GAGNANT:\n try:\n COUP = JEU.jouer_coup(1)\n\n JOUER = api.jouer_coup(ID_PARTIE, COUP[0], COUP[1])\n\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n\n print(JEU)\n except StopIteration as err:\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n # Mode manuel avec graphique (commande : python main.py -x idul)\n elif COMMANDE.graphique:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridorx.QuoridorX(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n\n JEU.afficher()\n\n GAGNANT = True\n while GAGNANT:\n OK_CHOIX = True\n while OK_CHOIX:\n CHOIX_COUP = input('Choisir votre coup(\"D\",\"MH\", \"MV\"): ')\n POS = input('Entrer les coordonnées (x,y): ')\n\n try:\n JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)\n\n OK_CHOIX = False\n\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n JEU.afficher()\n except StopIteration as err:\n OK_CHOIX = False\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n\n # Mode manuel contre le serveur (commande : python main.py idul)\n else:\n DEBUTER = api.débuter_partie(COMMANDE.idul)\n JEU = quoridor.Quoridor(DEBUTER[1]['joueurs'], DEBUTER[1]['murs'])\n ID_PARTIE = DEBUTER[0]\n\n print(JEU)\n\n GAGNANT = True\n while GAGNANT:\n OK_CHOIX = True\n while OK_CHOIX:\n CHOIX_COUP = input('Choisir votre coup(\"D\",\"MH\", \"MV\"): ')\n POS = input('Entrer les coordonnées (x,y): ')\n\n try:\n JOUER = api.jouer_coup(ID_PARTIE, CHOIX_COUP, POS)\n\n OK_CHOIX = False\n JEU.liste_joueurs = JOUER['joueurs']\n JEU.liste_murs = JOUER['murs']\n\n print(JEU)\n except StopIteration as err:\n OK_CHOIX = False\n GAGNANT = False\n print(f'Le gagnant est: {err}')\n except RuntimeError as err:\n print(err)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from selenium import webdriver
driver = webdriver.Chrome(executable_path=r'D:\Naveen\Selenium\chromedriver_win32\chromedriver.exe')
driver.maximize_window()
driver.get('http://zero.webappsecurity.com/')
parent_window_handle = driver.current_window_handle
driver.find_element_by_xpath("(//a[contains(text(),'privacy')])[1]").click()
windows = driver.window_handles
#driver.switch_to.window(windows[1])
for window in windows:
driver.switch_to.window(window)
if driver.title == "Legal Information | Micro Focus":
break
driver.find_element_by_link_text('Free Trials').click()
driver.close()
driver.switch_to.window(parent_window_handle)
driver.find_element_by_id('signin_button').click()
|
normal
|
{
"blob_id": "223413918ba2a49cd13a34026d39b17fb5944572",
"index": 5849,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.maximize_window()\ndriver.get('http://zero.webappsecurity.com/')\n<mask token>\ndriver.find_element_by_xpath(\"(//a[contains(text(),'privacy')])[1]\").click()\n<mask token>\nfor window in windows:\n driver.switch_to.window(window)\n if driver.title == 'Legal Information | Micro Focus':\n break\ndriver.find_element_by_link_text('Free Trials').click()\ndriver.close()\ndriver.switch_to.window(parent_window_handle)\ndriver.find_element_by_id('signin_button').click()\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome(executable_path=\n 'D:\\\\Naveen\\\\Selenium\\\\chromedriver_win32\\\\chromedriver.exe')\ndriver.maximize_window()\ndriver.get('http://zero.webappsecurity.com/')\nparent_window_handle = driver.current_window_handle\ndriver.find_element_by_xpath(\"(//a[contains(text(),'privacy')])[1]\").click()\nwindows = driver.window_handles\nfor window in windows:\n driver.switch_to.window(window)\n if driver.title == 'Legal Information | Micro Focus':\n break\ndriver.find_element_by_link_text('Free Trials').click()\ndriver.close()\ndriver.switch_to.window(parent_window_handle)\ndriver.find_element_by_id('signin_button').click()\n",
"step-4": "from selenium import webdriver\ndriver = webdriver.Chrome(executable_path=\n 'D:\\\\Naveen\\\\Selenium\\\\chromedriver_win32\\\\chromedriver.exe')\ndriver.maximize_window()\ndriver.get('http://zero.webappsecurity.com/')\nparent_window_handle = driver.current_window_handle\ndriver.find_element_by_xpath(\"(//a[contains(text(),'privacy')])[1]\").click()\nwindows = driver.window_handles\nfor window in windows:\n driver.switch_to.window(window)\n if driver.title == 'Legal Information | Micro Focus':\n break\ndriver.find_element_by_link_text('Free Trials').click()\ndriver.close()\ndriver.switch_to.window(parent_window_handle)\ndriver.find_element_by_id('signin_button').click()\n",
"step-5": "from selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path=r'D:\\Naveen\\Selenium\\chromedriver_win32\\chromedriver.exe')\ndriver.maximize_window()\ndriver.get('http://zero.webappsecurity.com/')\n\nparent_window_handle = driver.current_window_handle\ndriver.find_element_by_xpath(\"(//a[contains(text(),'privacy')])[1]\").click()\n\nwindows = driver.window_handles\n#driver.switch_to.window(windows[1])\n\nfor window in windows:\n driver.switch_to.window(window)\n if driver.title == \"Legal Information | Micro Focus\":\n break\n\ndriver.find_element_by_link_text('Free Trials').click()\ndriver.close()\ndriver.switch_to.window(parent_window_handle)\ndriver.find_element_by_id('signin_button').click()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pickle
import numpy as np
import math
class AdaBoostClassifier:
'''A simple AdaBoost Classifier.'''
def __init__(self, weak_classifier, n_weakers_limit):
'''Initialize AdaBoostClassifier
Args:
weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.
n_weakers_limit: The maximum number of weak classifier the model can use.
'''
self.weakClassifier = weak_classifier
self.iteration = n_weakers_limit
def is_good_enough(self):
'''Optional'''
pass
def calculateError(self, y, predictY, weights):
"""
函数作用:计算误差
:param y:列表,标签
:param predictY:列表,元素是预测值
:param weights:列表,权重值
:return:误差
"""
error = 0
for i in range(len(y)):
if y[i] != predictY[i]:
error += weights[i]
return error
def fit(self,X,y):
'''Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).
'''
row, col = X.shape
weightArray = [(1 / row)] * row
self.alphaList = []
self.finalClassifierList = []
for i in range(self.iteration):
clf = self.weakClassifier(max_depth=2)
clf.fit(X,y,weightArray)
predictY = clf.predict(X)
error = self.calculateError(y, predictY, weightArray)
if error > 0.5:
break
else:
self.finalClassifierList.append(clf)
alpha = 0.5 * math.log((1-error) / error)
self.alphaList.append(alpha)
aYH = alpha * y * predictY * (-1)
tempWeights = weightArray * np.exp(aYH)
tempSum = np.sum(tempWeights)
weightArray = tempWeights / tempSum
def predict_scores(self, X):
'''Calculate the weighted sum score of the whole base classifiers for given samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
Returns:
An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).
'''
pass
def predict(self, X, threshold=0):
'''Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
'''
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis = 1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
@staticmethod
def save(model, filename):
with open(filename, "wb") as f:
pickle.dump(model, f)
@staticmethod
def load(filename):
with open(filename, "rb") as f:
return pickle.load(f)
|
normal
|
{
"blob_id": "905d8be76ef245a2b8fcfb3f806f8922d351ecf0",
"index": 8877,
"step-1": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n <mask token>\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-2": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n <mask token>\n\n def is_good_enough(self):\n \"\"\"Optional\"\"\"\n pass\n <mask token>\n\n def fit(self, X, y):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n \"\"\"\n row, col = X.shape\n weightArray = [1 / row] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X, y, weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1 - error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * -1\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n \"\"\"Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n \"\"\"\n pass\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-3": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n <mask token>\n\n def is_good_enough(self):\n \"\"\"Optional\"\"\"\n pass\n\n def calculateError(self, y, predictY, weights):\n \"\"\"\n\t\t函数作用:计算误差\n :param y:列表,标签\n :param predictY:列表,元素是预测值\n :param weights:列表,权重值\n :return:误差\n \"\"\"\n error = 0\n for i in range(len(y)):\n if y[i] != predictY[i]:\n error += weights[i]\n return error\n\n def fit(self, X, y):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n \"\"\"\n row, col = X.shape\n weightArray = [1 / row] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X, y, weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1 - error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * -1\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n \"\"\"Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n \"\"\"\n pass\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-4": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n\n def __init__(self, weak_classifier, n_weakers_limit):\n \"\"\"Initialize AdaBoostClassifier\n\n Args:\n weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.\n n_weakers_limit: The maximum number of weak classifier the model can use.\n \"\"\"\n self.weakClassifier = weak_classifier\n self.iteration = n_weakers_limit\n\n def is_good_enough(self):\n \"\"\"Optional\"\"\"\n pass\n\n def calculateError(self, y, predictY, weights):\n \"\"\"\n\t\t函数作用:计算误差\n :param y:列表,标签\n :param predictY:列表,元素是预测值\n :param weights:列表,权重值\n :return:误差\n \"\"\"\n error = 0\n for i in range(len(y)):\n if y[i] != predictY[i]:\n error += weights[i]\n return error\n\n def fit(self, X, y):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n \"\"\"\n row, col = X.shape\n weightArray = [1 / row] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X, y, weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1 - error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * -1\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n \"\"\"Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n \"\"\"\n pass\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-5": "import pickle\nimport numpy as np\nimport math\n\nclass AdaBoostClassifier:\n '''A simple AdaBoost Classifier.'''\n\n def __init__(self, weak_classifier, n_weakers_limit):\n '''Initialize AdaBoostClassifier\n\n Args:\n weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.\n n_weakers_limit: The maximum number of weak classifier the model can use.\n '''\n self.weakClassifier = weak_classifier\n self.iteration = n_weakers_limit\n\n def is_good_enough(self):\n '''Optional'''\n pass\n\n def calculateError(self, y, predictY, weights):\n \"\"\"\n\t\t函数作用:计算误差\n :param y:列表,标签\n :param predictY:列表,元素是预测值\n :param weights:列表,权重值\n :return:误差\n \"\"\"\n error = 0\n for i in range(len(y)):\n if y[i] != predictY[i]:\n error += weights[i]\n return error\n\n def fit(self,X,y):\n '''Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n '''\n row, col = X.shape\n weightArray = [(1 / row)] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X,y,weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1-error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * (-1)\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n '''Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n '''\n\n pass\n\n def predict(self, X, threshold=0):\n '''Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n '''\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis = 1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, \"wb\") as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n",
"step-ids": [
3,
7,
8,
9,
12
]
}
|
[
3,
7,
8,
9,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def divisible_by_2(my_list=[]):
if my_list is None or len(my_list) == 0:
return None
new = []
for num in my_list:
if num % 2 == 0:
new.append(True)
else:
new.append(False)
return new
<|reserved_special_token_1|>
#!/usr/bin/python3
def divisible_by_2(my_list=[]):
if my_list is None or len(my_list) == 0:
return None
new = []
for num in my_list:
if num % 2 == 0:
new.append(True)
else:
new.append(False)
return new
|
flexible
|
{
"blob_id": "17f91b612fad14200d2911e2cb14e740b239f9ff",
"index": 4894,
"step-1": "<mask token>\n",
"step-2": "def divisible_by_2(my_list=[]):\n if my_list is None or len(my_list) == 0:\n return None\n new = []\n for num in my_list:\n if num % 2 == 0:\n new.append(True)\n else:\n new.append(False)\n return new\n",
"step-3": "#!/usr/bin/python3\ndef divisible_by_2(my_list=[]):\n if my_list is None or len(my_list) == 0:\n return None\n new = []\n for num in my_list:\n if num % 2 == 0:\n new.append(True)\n else:\n new.append(False)\n return new\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# pylint: disable=dangerous-default-value,wrong-import-position,unused-import, import-outside-toplevel
def create_app(settings_override={}):
app = Flask(__name__)
app.config.from_object('zezin.settings.Configuration')
app.config.update(settings_override)
db.init_app(app)
from zezin.views import partners_routes
app.register_blueprint(blueprint=partners_routes)
return app
import zezin.models # isort:skip
|
normal
|
{
"blob_id": "6affc182f5d3353d46f6e9a21344bc85bf894165",
"index": 948,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\n<mask token>\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\n<mask token>\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\nimport zezin.models\n",
"step-5": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n\n# pylint: disable=dangerous-default-value,wrong-import-position,unused-import, import-outside-toplevel\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n\n db.init_app(app)\n\n from zezin.views import partners_routes\n\n app.register_blueprint(blueprint=partners_routes)\n\n return app\n\n\nimport zezin.models # isort:skip\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
/usr/share/pyshared/Bio/Phylo/_io.py
|
normal
|
{
"blob_id": "7e50fc5eb794d7f2e4805924dcc7a99296e0d732",
"index": 614,
"step-1": "/usr/share/pyshared/Bio/Phylo/_io.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-08-03 02:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Kategori',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama_kategori', models.CharField(max_length=30)),
('deskripsi', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Stone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kode', models.CharField(max_length=30)),
('deskripsi', models.CharField(max_length=200)),
('cover_stone', models.ImageField(upload_to='\\images')),
('kategori', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stones.Kategori')),
],
),
]
|
normal
|
{
"blob_id": "cdd929ee041c485d2a6c1149ea1b1ced92d7b7ab",
"index": 5972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Kategori', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('nama_kategori', models.CharField(\n max_length=30)), ('deskripsi', models.CharField(max_length=100))]),\n migrations.CreateModel(name='Stone', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('kode', models.CharField(max_length=30)), (\n 'deskripsi', models.CharField(max_length=200)), ('cover_stone',\n models.ImageField(upload_to='\\\\images')), ('kategori', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'stones.Kategori'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Kategori', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('nama_kategori', models.CharField(\n max_length=30)), ('deskripsi', models.CharField(max_length=100))]),\n migrations.CreateModel(name='Stone', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('kode', models.CharField(max_length=30)), (\n 'deskripsi', models.CharField(max_length=200)), ('cover_stone',\n models.ImageField(upload_to='\\\\images')), ('kategori', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'stones.Kategori'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-08-03 02:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Kategori',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nama_kategori', models.CharField(max_length=30)),\n ('deskripsi', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Stone',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('kode', models.CharField(max_length=30)),\n ('deskripsi', models.CharField(max_length=200)),\n ('cover_stone', models.ImageField(upload_to='\\\\images')),\n ('kategori', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stones.Kategori')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = 'anderson'
from pyramid.security import Everyone, Allow, ALL_PERMISSIONS
class Root(object):
#Access Control List
__acl__ = [(Allow, Everyone, 'view'),
(Allow, 'role_admin', ALL_PERMISSIONS),
(Allow, 'role_usuario', 'comum')]
def __init__(self, request):
pass
|
normal
|
{
"blob_id": "5ee2a51ea981f0feab688d9c571620a95d89a422",
"index": 6980,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Root(object):\n <mask token>\n\n def __init__(self, request):\n pass\n",
"step-3": "__author__ = 'anderson'\n<mask token>\n\n\nclass Root(object):\n __acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',\n ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-4": "__author__ = 'anderson'\nfrom pyramid.security import Everyone, Allow, ALL_PERMISSIONS\n\n\nclass Root(object):\n __acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',\n ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-5": "__author__ = 'anderson'\nfrom pyramid.security import Everyone, Allow, ALL_PERMISSIONS\n\n\nclass Root(object):\n #Access Control List\n __acl__ = [(Allow, Everyone, 'view'),\n (Allow, 'role_admin', ALL_PERMISSIONS),\n (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-ids": [
0,
2,
4,
5,
6
]
}
|
[
0,
2,
4,
5,
6
] |
"""Wrapper over the command line migrate tool to better work with
config files."""
import subprocess
import sys
from alembic.migration import MigrationContext
from ..lib.alembic import bootstrap_db
from ..lib.sqla import create_engine
from ..models import DBSession as db
def main():
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\n'
% sys.argv[0])
sys.exit(1)
config_uri = sys.argv.pop(1)
if sys.argv[1] == 'bootstrap':
bootstrap_db(config_uri)
else:
engine = create_engine(config_uri)
db.configure(bind=engine)
context = MigrationContext.configure(engine.connect())
db_version = context.get_current_revision()
if not db_version:
sys.stderr.write('Database not initialized.\n'
'Try this: "sortie-db-manage %s bootstrap"\n'
% config_uri)
sys.exit(2)
cmd = ['alembic', '-c', config_uri] + sys.argv[1:]
print(subprocess.check_output(cmd))
|
normal
|
{
"blob_id": "7b459cf321f351e1485a9aef0ca23067f411e430",
"index": 7446,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write(\n 'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n' % sys.argv[0])\n sys.exit(1)\n config_uri = sys.argv.pop(1)\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n if not db_version:\n sys.stderr.write(\n \"\"\"Database not initialized.\nTry this: \"sortie-db-manage %s bootstrap\\\"\n\"\"\"\n % config_uri)\n sys.exit(2)\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n print(subprocess.check_output(cmd))\n",
"step-3": "<mask token>\nimport subprocess\nimport sys\nfrom alembic.migration import MigrationContext\nfrom ..lib.alembic import bootstrap_db\nfrom ..lib.sqla import create_engine\nfrom ..models import DBSession as db\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write(\n 'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n' % sys.argv[0])\n sys.exit(1)\n config_uri = sys.argv.pop(1)\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n if not db_version:\n sys.stderr.write(\n \"\"\"Database not initialized.\nTry this: \"sortie-db-manage %s bootstrap\\\"\n\"\"\"\n % config_uri)\n sys.exit(2)\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n print(subprocess.check_output(cmd))\n",
"step-4": "\"\"\"Wrapper over the command line migrate tool to better work with\nconfig files.\"\"\"\n\nimport subprocess\nimport sys\n\nfrom alembic.migration import MigrationContext\n\nfrom ..lib.alembic import bootstrap_db\nfrom ..lib.sqla import create_engine\nfrom ..models import DBSession as db\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n'\n % sys.argv[0])\n sys.exit(1)\n\n config_uri = sys.argv.pop(1)\n\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n\n if not db_version:\n sys.stderr.write('Database not initialized.\\n'\n 'Try this: \"sortie-db-manage %s bootstrap\"\\n'\n % config_uri)\n sys.exit(2)\n\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n\n print(subprocess.check_output(cmd))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
kube_description= \
"""
Compute Server
"""
kube_instruction= \
"""
Not instructions yet
"""
#
# Standard geni-lib/portal libraries
#
import geni.portal as portal
import geni.rspec.pg as PG
import geni.rspec.emulab as elab
import geni.rspec.igext as IG
import geni.urn as URN
#
# PhantomNet extensions.
#
import geni.rspec.emulab.pnext as PN
#
# This geni-lib script is designed to run in the PhantomNet Portal.
#
pc = portal.Context()
params = pc.bindParameters()
#
# Give the library a chance to return nice JSON-formatted exception(s) and/or
# warnings; this might sys.exit().
#
pc.verifyParameters()
rspec = PG.Request()
compute = rspec.RawPC("compute")
compute.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD'
compute.hardware_type = 'd430'
compute.routable_control_ip = True
tour = IG.Tour()
tour.Description(IG.Tour.TEXT,kube_description)
tour.Instructions(IG.Tour.MARKDOWN,kube_instruction)
rspec.addTour(tour)
#
# Print and go!
#
pc.printRequestRSpec(rspec)
|
normal
|
{
"blob_id": "ff7a865822a4f8b343ab4cb490c24d6d530b14e1",
"index": 934,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npc.verifyParameters()\n<mask token>\ntour.Description(IG.Tour.TEXT, kube_description)\ntour.Instructions(IG.Tour.MARKDOWN, kube_instruction)\nrspec.addTour(tour)\npc.printRequestRSpec(rspec)\n",
"step-3": "kube_description = \"\"\"\nCompute Server\n\"\"\"\nkube_instruction = \"\"\"\nNot instructions yet\n\"\"\"\n<mask token>\npc = portal.Context()\nparams = pc.bindParameters()\npc.verifyParameters()\nrspec = PG.Request()\ncompute = rspec.RawPC('compute')\ncompute.disk_image = (\n 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD')\ncompute.hardware_type = 'd430'\ncompute.routable_control_ip = True\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT, kube_description)\ntour.Instructions(IG.Tour.MARKDOWN, kube_instruction)\nrspec.addTour(tour)\npc.printRequestRSpec(rspec)\n",
"step-4": "kube_description = \"\"\"\nCompute Server\n\"\"\"\nkube_instruction = \"\"\"\nNot instructions yet\n\"\"\"\nimport geni.portal as portal\nimport geni.rspec.pg as PG\nimport geni.rspec.emulab as elab\nimport geni.rspec.igext as IG\nimport geni.urn as URN\nimport geni.rspec.emulab.pnext as PN\npc = portal.Context()\nparams = pc.bindParameters()\npc.verifyParameters()\nrspec = PG.Request()\ncompute = rspec.RawPC('compute')\ncompute.disk_image = (\n 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD')\ncompute.hardware_type = 'd430'\ncompute.routable_control_ip = True\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT, kube_description)\ntour.Instructions(IG.Tour.MARKDOWN, kube_instruction)\nrspec.addTour(tour)\npc.printRequestRSpec(rspec)\n",
"step-5": "#!/usr/bin/env python\n\nkube_description= \\\n\"\"\"\nCompute Server\n\"\"\"\nkube_instruction= \\\n\"\"\"\nNot instructions yet\n\"\"\"\n\n#\n# Standard geni-lib/portal libraries\n#\nimport geni.portal as portal\nimport geni.rspec.pg as PG\nimport geni.rspec.emulab as elab\nimport geni.rspec.igext as IG\nimport geni.urn as URN\n\n\n\n#\n# PhantomNet extensions.\n#\nimport geni.rspec.emulab.pnext as PN \n\n#\n# This geni-lib script is designed to run in the PhantomNet Portal.\n#\npc = portal.Context()\n\n\nparams = pc.bindParameters()\n\n#\n# Give the library a chance to return nice JSON-formatted exception(s) and/or\n# warnings; this might sys.exit().\n#\npc.verifyParameters()\n\nrspec = PG.Request()\ncompute = rspec.RawPC(\"compute\")\ncompute.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD'\ncompute.hardware_type = 'd430'\ncompute.routable_control_ip = True\n\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT,kube_description)\ntour.Instructions(IG.Tour.MARKDOWN,kube_instruction)\nrspec.addTour(tour)\n\n#\n# Print and go!\n#\npc.printRequestRSpec(rspec)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""inactivate fb posts
Revision ID: f37637c1bcf8
Revises: 43c7ecf8ed02
Create Date: 2017-06-22 12:01:59.623040
"""
from alembic import op
from pd.facebook.models import MediaType
# revision identifiers, used by Alembic.
revision = 'f37637c1bcf8'
down_revision = '43c7ecf8ed02'
branch_labels = None
depends_on = None
# set active status of posts
update = "UPDATE post SET is_active = {}"
# filter those: not gif, and not shopping; 3 == gif
where = "WHERE media_type != {} AND is_shopping = false".format(
MediaType.gif.value)
def upgrade():
op.execute(' '.join([update.format('false'), where]))
def downgrade():
op.execute(' '.join([update.format('true'), where]))
|
normal
|
{
"blob_id": "89ed30411c624e3d930db0bc0b5b716a10908727",
"index": 8259,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.execute(' '.join([update.format('false'), where]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.execute(' '.join([update.format('false'), where]))\n\n\ndef downgrade():\n op.execute(' '.join([update.format('true'), where]))\n",
"step-3": "<mask token>\nrevision = 'f37637c1bcf8'\ndown_revision = '43c7ecf8ed02'\nbranch_labels = None\ndepends_on = None\nupdate = 'UPDATE post SET is_active = {}'\nwhere = 'WHERE media_type != {} AND is_shopping = false'.format(MediaType.\n gif.value)\n\n\ndef upgrade():\n op.execute(' '.join([update.format('false'), where]))\n\n\ndef downgrade():\n op.execute(' '.join([update.format('true'), where]))\n",
"step-4": "<mask token>\nfrom alembic import op\nfrom pd.facebook.models import MediaType\nrevision = 'f37637c1bcf8'\ndown_revision = '43c7ecf8ed02'\nbranch_labels = None\ndepends_on = None\nupdate = 'UPDATE post SET is_active = {}'\nwhere = 'WHERE media_type != {} AND is_shopping = false'.format(MediaType.\n gif.value)\n\n\ndef upgrade():\n op.execute(' '.join([update.format('false'), where]))\n\n\ndef downgrade():\n op.execute(' '.join([update.format('true'), where]))\n",
"step-5": "\"\"\"inactivate fb posts\n\nRevision ID: f37637c1bcf8\nRevises: 43c7ecf8ed02\nCreate Date: 2017-06-22 12:01:59.623040\n\n\"\"\"\nfrom alembic import op\nfrom pd.facebook.models import MediaType\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f37637c1bcf8'\ndown_revision = '43c7ecf8ed02'\nbranch_labels = None\ndepends_on = None\n\n# set active status of posts\nupdate = \"UPDATE post SET is_active = {}\"\n# filter those: not gif, and not shopping; 3 == gif\nwhere = \"WHERE media_type != {} AND is_shopping = false\".format(\n MediaType.gif.value)\n\n\ndef upgrade():\n op.execute(' '.join([update.format('false'), where]))\n\n\ndef downgrade():\n op.execute(' '.join([update.format('true'), where]))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
def downgrade():
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength',
'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = 'a07768b0d4c0'
down_revision = 'a80cd9a35e58'
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
def downgrade():
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength',
'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from alembic import op
import sqlalchemy as sa
revision = 'a07768b0d4c0'
down_revision = 'a80cd9a35e58'
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
def downgrade():
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength',
'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
<|reserved_special_token_1|>
"""product_ingredient unique constraint
Revision ID: a07768b0d4c0
Revises: a80cd9a35e58
Create Date: 2017-05-18 11:39:52.258266
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a07768b0d4c0'
down_revision = 'a80cd9a35e58'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name', 'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(), nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength', 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength', 'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
# ### end Alembic commands ###
|
flexible
|
{
"blob_id": "d0a73385db0dd6f729d267095ef83b9fec72e40c",
"index": 1464,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',\n 'unit'])\n op.create_unique_constraint(None, 'product', ['nappi_code'])\n op.add_column('product_ingredient', sa.Column('strength', sa.String(),\n nullable=True))\n op.create_unique_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])\n op.drop_column('product_ingredient', 'stength')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',\n 'unit'])\n op.create_unique_constraint(None, 'product', ['nappi_code'])\n op.add_column('product_ingredient', sa.Column('strength', sa.String(),\n nullable=True))\n op.create_unique_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])\n op.drop_column('product_ingredient', 'stength')\n\n\ndef downgrade():\n op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),\n autoincrement=False, nullable=True))\n op.drop_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', type_='unique')\n op.drop_column('product_ingredient', 'strength')\n op.drop_constraint(None, 'product', type_='unique')\n op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')\n",
"step-3": "<mask token>\nrevision = 'a07768b0d4c0'\ndown_revision = 'a80cd9a35e58'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',\n 'unit'])\n op.create_unique_constraint(None, 'product', ['nappi_code'])\n op.add_column('product_ingredient', sa.Column('strength', sa.String(),\n nullable=True))\n op.create_unique_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])\n op.drop_column('product_ingredient', 'stength')\n\n\ndef downgrade():\n op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),\n autoincrement=False, nullable=True))\n op.drop_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', type_='unique')\n op.drop_column('product_ingredient', 'strength')\n op.drop_constraint(None, 'product', type_='unique')\n op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = 'a07768b0d4c0'\ndown_revision = 'a80cd9a35e58'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',\n 'unit'])\n op.create_unique_constraint(None, 'product', ['nappi_code'])\n op.add_column('product_ingredient', sa.Column('strength', sa.String(),\n nullable=True))\n op.create_unique_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])\n op.drop_column('product_ingredient', 'stength')\n\n\ndef downgrade():\n op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),\n autoincrement=False, nullable=True))\n op.drop_constraint('_unique_product_ingredient_strength',\n 'product_ingredient', type_='unique')\n op.drop_column('product_ingredient', 'strength')\n op.drop_constraint(None, 'product', type_='unique')\n op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')\n",
"step-5": "\"\"\"product_ingredient unique constraint\n\nRevision ID: a07768b0d4c0\nRevises: a80cd9a35e58\nCreate Date: 2017-05-18 11:39:52.258266\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a07768b0d4c0'\ndown_revision = 'a80cd9a35e58'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name', 'unit'])\n op.create_unique_constraint(None, 'product', ['nappi_code'])\n op.add_column('product_ingredient', sa.Column('strength', sa.String(), nullable=True))\n op.create_unique_constraint('_unique_product_ingredient_strength', 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])\n op.drop_column('product_ingredient', 'stength')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_constraint('_unique_product_ingredient_strength', 'product_ingredient', type_='unique')\n op.drop_column('product_ingredient', 'strength')\n op.drop_constraint(None, 'product', type_='unique')\n op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')\n # ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def j():
global i
import pandas as pd
st1.update({i: st})
data = pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i: fa})
data1 = pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i = i + 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.child('Student').push({'DAY': ''})
db.child('Faculty').push({'DAY': ''})
<|reserved_special_token_0|>
def j():
global i
import pandas as pd
st1.update({i: st})
data = pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i: fa})
data1 = pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i = i + 1
while 1:
schedule.every(10).seconds.do(j)
schedule.run_pending()
time.sleep(1)
f = input("enter 's' for student,enter 'f' for faculty")
f = f.upper()
if f == 'S':
name = input('enter student name')
if name in student:
a = input(
"enter 'a' for absent,enter 'l' for leave,enter 'p' for present"
)
a = a.upper()
if a == 'L':
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('sender@gamil.com', 'akki@9510')
message = name + 'is on leave'
server.sendmail('sender@gamil.com', 'receiver@gamil.com',
message)
a = 'A'
st.update({name: a})
from datetime import datetime
now = datetime.now()
date_time = now.strftime('%d-%m-%Y')
db.child('Student').child('DAY').child(date_time).update({name: a})
if f == 'F':
name = input('enter faculty name')
if name in faculty:
a = input(
"enter 'a' for absent,enter 'l' for leave,enter 'p' for present"
)
a = a.upper()
if a == 'L':
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('sender@gamil.com', 'akki@9510')
message = name + 'is on leave'
server.sendmail('sender@gamil.com', 'receiver@gamil.com',
message)
a = 'A'
fa.update({name: a})
from datetime import datetime
now = datetime.now()
date_time = now.strftime('%d-%m-%Y')
db.child('Faculty').child('DAY').child(date_time).update({name: a})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = {'apiKey': 'apiKey', 'authDomain':
'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':
'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':
'erproject-dd24e-default-rtdb.appspot.com'}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child('Student').push({'DAY': ''})
db.child('Faculty').push({'DAY': ''})
student = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']
faculty = ['f1', 'f2', 'f3', 'f4', 'f5']
st = {}
data, data1 = '', ''
st1 = {}
fa = {}
fa1 = {}
i = 1
<|reserved_special_token_0|>
def j():
global i
import pandas as pd
st1.update({i: st})
data = pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i: fa})
data1 = pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i = i + 1
while 1:
schedule.every(10).seconds.do(j)
schedule.run_pending()
time.sleep(1)
f = input("enter 's' for student,enter 'f' for faculty")
f = f.upper()
if f == 'S':
name = input('enter student name')
if name in student:
a = input(
"enter 'a' for absent,enter 'l' for leave,enter 'p' for present"
)
a = a.upper()
if a == 'L':
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('sender@gamil.com', 'akki@9510')
message = name + 'is on leave'
server.sendmail('sender@gamil.com', 'receiver@gamil.com',
message)
a = 'A'
st.update({name: a})
from datetime import datetime
now = datetime.now()
date_time = now.strftime('%d-%m-%Y')
db.child('Student').child('DAY').child(date_time).update({name: a})
if f == 'F':
name = input('enter faculty name')
if name in faculty:
a = input(
"enter 'a' for absent,enter 'l' for leave,enter 'p' for present"
)
a = a.upper()
if a == 'L':
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('sender@gamil.com', 'akki@9510')
message = name + 'is on leave'
server.sendmail('sender@gamil.com', 'receiver@gamil.com',
message)
a = 'A'
fa.update({name: a})
from datetime import datetime
now = datetime.now()
date_time = now.strftime('%d-%m-%Y')
db.child('Faculty').child('DAY').child(date_time).update({name: a})
<|reserved_special_token_1|>
import pyrebase
import smtplib
config = {'apiKey': 'apiKey', 'authDomain':
'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':
'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':
'erproject-dd24e-default-rtdb.appspot.com'}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child('Student').push({'DAY': ''})
db.child('Faculty').push({'DAY': ''})
student = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']
faculty = ['f1', 'f2', 'f3', 'f4', 'f5']
st = {}
data, data1 = '', ''
st1 = {}
fa = {}
fa1 = {}
i = 1
import schedule
import time
def j():
global i
import pandas as pd
st1.update({i: st})
data = pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i: fa})
data1 = pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i = i + 1
while 1:
schedule.every(10).seconds.do(j)
schedule.run_pending()
time.sleep(1)
f = input("enter 's' for student,enter 'f' for faculty")
f = f.upper()
if f == 'S':
name = input('enter student name')
if name in student:
a = input(
"enter 'a' for absent,enter 'l' for leave,enter 'p' for present"
)
a = a.upper()
if a == 'L':
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('sender@gamil.com', 'akki@9510')
message = name + 'is on leave'
server.sendmail('sender@gamil.com', 'receiver@gamil.com',
message)
a = 'A'
st.update({name: a})
from datetime import datetime
now = datetime.now()
date_time = now.strftime('%d-%m-%Y')
db.child('Student').child('DAY').child(date_time).update({name: a})
if f == 'F':
name = input('enter faculty name')
if name in faculty:
a = input(
"enter 'a' for absent,enter 'l' for leave,enter 'p' for present"
)
a = a.upper()
if a == 'L':
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('sender@gamil.com', 'akki@9510')
message = name + 'is on leave'
server.sendmail('sender@gamil.com', 'receiver@gamil.com',
message)
a = 'A'
fa.update({name: a})
from datetime import datetime
now = datetime.now()
date_time = now.strftime('%d-%m-%Y')
db.child('Faculty').child('DAY').child(date_time).update({name: a})
<|reserved_special_token_1|>
#ERP PROJECT
import pyrebase
import smtplib
config = {
"apiKey": "apiKey",
"authDomain": "erproject-dd24e-default-rtdb.firebaseapp.com",
"databaseURL": "https://erproject-dd24e-default-rtdb.firebaseio.com",
"storageBucket": "erproject-dd24e-default-rtdb.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child("Student").push({"DAY":""})
db.child("Faculty").push({"DAY":""})
student=["s1","s2","s3","s4","s5","s6","s7","s8","s9","s10"]
faculty=["f1","f2","f3","f4","f5"]
st={}
data,data1='',''
st1={}
fa={}
fa1={}
i=1
import schedule
import time
def j():
global i
import pandas as pd
st1.update({i:st})
data=pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i:fa})
data1=pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i=i+1
while(1):
schedule.every(10).seconds.do(j)
schedule.run_pending()
time.sleep(1)
f=input("enter 's' for student,enter 'f' for faculty")
f=f.upper()
if(f=="S"):
name=input("enter student name")
if name in student:
a=input("enter 'a' for absent,enter 'l' for leave,enter 'p' for present")
a=a.upper()
if(a=="L"): #please change sender and receiver's email id for this function to work
import smtplib
server =smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login("sender@gamil.com","akki@9510")
message=name+"is on leave"
server.sendmail("sender@gamil.com","receiver@gamil.com",message)
a="A"
st.update({name:a})
from datetime import datetime
now = datetime.now() # current date and time
date_time = now.strftime("%d-%m-%Y")
db.child("Student").child("DAY").child(date_time).update({name:a})
if(f=="F"):
name=input("enter faculty name")
if name in faculty:
a=input("enter 'a' for absent,enter 'l' for leave,enter 'p' for present")
a=a.upper()
if(a=="L"):
import smtplib
server =smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login("sender@gamil.com","akki@9510")
message=name+"is on leave"
server.sendmail("sender@gamil.com","receiver@gamil.com",message)
a="A"
fa.update({name:a})
from datetime import datetime
now = datetime.now() # current date and time
date_time = now.strftime("%d-%m-%Y")
db.child("Faculty").child("DAY").child(date_time).update({name:a})
|
flexible
|
{
"blob_id": "3e7e6d7a0137d91dc7437ff91a39d7f8faad675e",
"index": 7075,
"step-1": "<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\n<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-3": "<mask token>\nconfig = {'apiKey': 'apiKey', 'authDomain':\n 'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':\n 'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':\n 'erproject-dd24e-default-rtdb.appspot.com'}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\nstudent = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']\nfaculty = ['f1', 'f2', 'f3', 'f4', 'f5']\nst = {}\ndata, data1 = '', ''\nst1 = {}\nfa = {}\nfa1 = {}\ni = 1\n<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-4": "import pyrebase\nimport smtplib\nconfig = {'apiKey': 'apiKey', 'authDomain':\n 'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':\n 'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':\n 'erproject-dd24e-default-rtdb.appspot.com'}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\nstudent = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']\nfaculty = ['f1', 'f2', 'f3', 'f4', 'f5']\nst = {}\ndata, data1 = '', ''\nst1 = {}\nfa = {}\nfa1 = {}\ni = 1\nimport schedule\nimport time\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-5": "#ERP PROJECT\n\n\nimport pyrebase\nimport smtplib\n\nconfig = {\n \"apiKey\": \"apiKey\",\n \"authDomain\": \"erproject-dd24e-default-rtdb.firebaseapp.com\",\n \"databaseURL\": \"https://erproject-dd24e-default-rtdb.firebaseio.com\",\n \"storageBucket\": \"erproject-dd24e-default-rtdb.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\n\ndb.child(\"Student\").push({\"DAY\":\"\"})\ndb.child(\"Faculty\").push({\"DAY\":\"\"}) \nstudent=[\"s1\",\"s2\",\"s3\",\"s4\",\"s5\",\"s6\",\"s7\",\"s8\",\"s9\",\"s10\"]\nfaculty=[\"f1\",\"f2\",\"f3\",\"f4\",\"f5\"]\nst={}\ndata,data1='',''\nst1={}\nfa={}\nfa1={}\ni=1\nimport schedule\nimport time\ndef j():\n global i\n import pandas as pd\n st1.update({i:st})\n data=pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i:fa})\n data1=pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i=i+1 \nwhile(1):\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f=input(\"enter 's' for student,enter 'f' for faculty\")\n f=f.upper()\n if(f==\"S\"):\n name=input(\"enter student name\")\n if name in student:\n a=input(\"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\")\n a=a.upper()\n if(a==\"L\"): #please change sender and receiver's email id for this function to work \n import smtplib\n server =smtplib.SMTP(\"smtp.gmail.com\",587)\n server.starttls()\n server.login(\"sender@gamil.com\",\"akki@9510\")\n message=name+\"is on leave\"\n server.sendmail(\"sender@gamil.com\",\"receiver@gamil.com\",message)\n a=\"A\"\n st.update({name:a})\n from datetime import datetime\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y\")\n db.child(\"Student\").child(\"DAY\").child(date_time).update({name:a})\n \n if(f==\"F\"):\n name=input(\"enter faculty name\")\n if name in faculty:\n a=input(\"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\")\n a=a.upper()\n if(a==\"L\"):\n import smtplib\n server =smtplib.SMTP(\"smtp.gmail.com\",587)\n server.starttls()\n server.login(\"sender@gamil.com\",\"akki@9510\")\n message=name+\"is on leave\"\n server.sendmail(\"sender@gamil.com\",\"receiver@gamil.com\",message)\n a=\"A\"\n fa.update({name:a})\n from datetime import datetime\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y\")\n db.child(\"Faculty\").child(\"DAY\").child(date_time).update({name:a})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')
lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate
=False)
ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False
)
ax[1].set_xlabel('Pulse Phase', fontsize=25)
ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=
20, transform=ax[0].transAxes, bbox=dict(facecolor='white',
edgecolor='none', alpha=0.6))
ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',
fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',
edgecolor='none', alpha=0.6))
ax[0].tick_params(labelbottom=False)
fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation
='vertical', fontsize=25)
plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)
fig.savefig('poster_plot.svg')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')
lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate
=False)
ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False
)
ax[1].set_xlabel('Pulse Phase', fontsize=25)
ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=
20, transform=ax[0].transAxes, bbox=dict(facecolor='white',
edgecolor='none', alpha=0.6))
ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',
fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',
edgecolor='none', alpha=0.6))
ax[0].tick_params(labelbottom=False)
fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation
='vertical', fontsize=25)
plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)
fig.savefig('poster_plot.svg')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from LCClass import LightCurve
import matplotlib.pyplot as plt
import niutils
def main():
lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')
lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate
=False)
ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False
)
ax[1].set_xlabel('Pulse Phase', fontsize=25)
ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=
20, transform=ax[0].transAxes, bbox=dict(facecolor='white',
edgecolor='none', alpha=0.6))
ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',
fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',
edgecolor='none', alpha=0.6))
ax[0].tick_params(labelbottom=False)
fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation
='vertical', fontsize=25)
plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)
fig.savefig('poster_plot.svg')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
from LCClass import LightCurve
import matplotlib.pyplot as plt
import niutils
def main():
lc1821 = LightCurve("PSR_B1821-24/PSR_B1821-24_combined.evt")
lc0218 = LightCurve("PSR_J0218+4232/PSR_J0218+4232_combined.evt")
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False)
ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False)
ax[1].set_xlabel("Pulse Phase", fontsize=25)
ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top',
fontsize=20, transform=ax[0].transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))
ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top',
fontsize=20, transform=ax[1].transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))
ax[0].tick_params(labelbottom=False)
#plt.setp(ax[0].get_yticklabels()[0], visible=False)
fig.text(.04, .5, r'Photon Counts', ha='center', va='center',
rotation='vertical', fontsize=25)
plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15)
fig.savefig("poster_plot.svg")
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "48311ee17a3f2eca8db32d7672f540fa45a7a900",
"index": 3524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from LCClass import LightCurve\nimport matplotlib.pyplot as plt\nimport niutils\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\nfrom LCClass import LightCurve\nimport matplotlib.pyplot as plt\nimport niutils\n\ndef main():\n lc1821 = LightCurve(\"PSR_B1821-24/PSR_B1821-24_combined.evt\")\n lc0218 = LightCurve(\"PSR_J0218+4232/PSR_J0218+4232_combined.evt\")\n\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False)\n\n ax[1].set_xlabel(\"Pulse Phase\", fontsize=25)\n ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top', \n fontsize=20, transform=ax[0].transAxes,\n bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))\n ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top', \n fontsize=20, transform=ax[1].transAxes,\n bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))\n\n ax[0].tick_params(labelbottom=False)\n #plt.setp(ax[0].get_yticklabels()[0], visible=False)\n \n fig.text(.04, .5, r'Photon Counts', ha='center', va='center',\n rotation='vertical', fontsize=25)\n\n plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15)\n\n fig.savefig(\"poster_plot.svg\")\n\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while name != 'your name' and name != 'Your name':
print('Please type your name.')
name = input()
print('Thanks!')
<|reserved_special_token_1|>
name = ''
while name != 'your name' and name != 'Your name':
print('Please type your name.')
name = input()
print('Thanks!')
<|reserved_special_token_1|>
name = ''
while name != 'your name' and name != 'Your name':
print('Please type your name.')
name = input()
print('Thanks!')
#while 1 == 2 or :
# print('Type your name')
# name = input()
# if name == 'your name':
# break
#print('Thanks!')
|
flexible
|
{
"blob_id": "f3644b42d1a6c87c6169f8d123dadf6cd209270c",
"index": 2617,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile name != 'your name' and name != 'Your name':\n print('Please type your name.')\n name = input()\nprint('Thanks!')\n",
"step-3": "name = ''\nwhile name != 'your name' and name != 'Your name':\n print('Please type your name.')\n name = input()\nprint('Thanks!')\n",
"step-4": "name = ''\nwhile name != 'your name' and name != 'Your name':\n print('Please type your name.')\n name = input()\nprint('Thanks!')\n\n#while 1 == 2 or :\n# print('Type your name')\n# name = input()\n# if name == 'your name':\n# break\n#print('Thanks!')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
s = a[i]
ans[s - 1] = i + 1
print(*ans)
<|reserved_special_token_1|>
n = int(input())
a = [int(e) for e in input().split()]
ans = [0] * n
for i in range(n):
s = a[i]
ans[s - 1] = i + 1
print(*ans)
|
flexible
|
{
"blob_id": "f74e2e6b59330bd63fee9192e74a72178abc1cab",
"index": 8195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n s = a[i]\n ans[s - 1] = i + 1\nprint(*ans)\n",
"step-3": "n = int(input())\na = [int(e) for e in input().split()]\nans = [0] * n\nfor i in range(n):\n s = a[i]\n ans[s - 1] = i + 1\nprint(*ans)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import widget
import column
class Columns(widget.Widget):
def __init__(self, parent):
super(Columns, self).__init__(parent)
# self.root.mouse.on_drag_release.append(self.on_drag_release)
"""
def on_drag_release(self, x0, y0, x, y):
if not self.contains_point(x0, y0):
return None
if not self.contains_point(x, y):
return None
idx_from = self.get_child_idx_at_point(x0, y0)
idx_to = self.get_child_idx_at_point(x, y)
self.reorder_children(idx_from, idx_to)
"""
def add_column(self):
col = column.Column(self)
self.children.append(col)
def update(self):
area_sum = len(self.children)
ratio_accumulator = 0
for child in self.children:
area_share = 1
area_ratio = float(area_share)/area_sum
x = self.x + ratio_accumulator * self.dx
y = self.y
dx = area_ratio * self.dx
dy = self.dy
child.resize(x, y, dx, dy)
ratio_accumulator += area_ratio
def on_reposition_window(self, target, x, y):
if self.contains_point(x, y):
windows_from = target.parent
windows_to = self.get_child_at_point(x, y).windows
windows_from.detach_child(target)
idx_to = windows_to.get_child_idx_at_point(x, y)
if idx_to is None:
idx_to = 0
else:
idx_to += 1
windows_to.attach_child_at_idx(idx_to, target)
return False
|
normal
|
{
"blob_id": "e0874554c326bb11b53552e362bc8073bb57bc93",
"index": 9720,
"step-1": "<mask token>\n\n\nclass Columns(widget.Widget):\n\n def __init__(self, parent):\n super(Columns, self).__init__(parent)\n <mask token>\n\n def add_column(self):\n col = column.Column(self)\n self.children.append(col)\n <mask token>\n\n def on_reposition_window(self, target, x, y):\n if self.contains_point(x, y):\n windows_from = target.parent\n windows_to = self.get_child_at_point(x, y).windows\n windows_from.detach_child(target)\n idx_to = windows_to.get_child_idx_at_point(x, y)\n if idx_to is None:\n idx_to = 0\n else:\n idx_to += 1\n windows_to.attach_child_at_idx(idx_to, target)\n return False\n",
"step-2": "<mask token>\n\n\nclass Columns(widget.Widget):\n\n def __init__(self, parent):\n super(Columns, self).__init__(parent)\n <mask token>\n\n def add_column(self):\n col = column.Column(self)\n self.children.append(col)\n\n def update(self):\n area_sum = len(self.children)\n ratio_accumulator = 0\n for child in self.children:\n area_share = 1\n area_ratio = float(area_share) / area_sum\n x = self.x + ratio_accumulator * self.dx\n y = self.y\n dx = area_ratio * self.dx\n dy = self.dy\n child.resize(x, y, dx, dy)\n ratio_accumulator += area_ratio\n\n def on_reposition_window(self, target, x, y):\n if self.contains_point(x, y):\n windows_from = target.parent\n windows_to = self.get_child_at_point(x, y).windows\n windows_from.detach_child(target)\n idx_to = windows_to.get_child_idx_at_point(x, y)\n if idx_to is None:\n idx_to = 0\n else:\n idx_to += 1\n windows_to.attach_child_at_idx(idx_to, target)\n return False\n",
"step-3": "<mask token>\n\n\nclass Columns(widget.Widget):\n\n def __init__(self, parent):\n super(Columns, self).__init__(parent)\n \"\"\"\n def on_drag_release(self, x0, y0, x, y):\n if not self.contains_point(x0, y0):\n return None\n if not self.contains_point(x, y):\n return None\n\n idx_from = self.get_child_idx_at_point(x0, y0)\n idx_to = self.get_child_idx_at_point(x, y)\n\n self.reorder_children(idx_from, idx_to)\n \"\"\"\n\n def add_column(self):\n col = column.Column(self)\n self.children.append(col)\n\n def update(self):\n area_sum = len(self.children)\n ratio_accumulator = 0\n for child in self.children:\n area_share = 1\n area_ratio = float(area_share) / area_sum\n x = self.x + ratio_accumulator * self.dx\n y = self.y\n dx = area_ratio * self.dx\n dy = self.dy\n child.resize(x, y, dx, dy)\n ratio_accumulator += area_ratio\n\n def on_reposition_window(self, target, x, y):\n if self.contains_point(x, y):\n windows_from = target.parent\n windows_to = self.get_child_at_point(x, y).windows\n windows_from.detach_child(target)\n idx_to = windows_to.get_child_idx_at_point(x, y)\n if idx_to is None:\n idx_to = 0\n else:\n idx_to += 1\n windows_to.attach_child_at_idx(idx_to, target)\n return False\n",
"step-4": "import widget\nimport column\n\n\nclass Columns(widget.Widget):\n\n def __init__(self, parent):\n super(Columns, self).__init__(parent)\n \"\"\"\n def on_drag_release(self, x0, y0, x, y):\n if not self.contains_point(x0, y0):\n return None\n if not self.contains_point(x, y):\n return None\n\n idx_from = self.get_child_idx_at_point(x0, y0)\n idx_to = self.get_child_idx_at_point(x, y)\n\n self.reorder_children(idx_from, idx_to)\n \"\"\"\n\n def add_column(self):\n col = column.Column(self)\n self.children.append(col)\n\n def update(self):\n area_sum = len(self.children)\n ratio_accumulator = 0\n for child in self.children:\n area_share = 1\n area_ratio = float(area_share) / area_sum\n x = self.x + ratio_accumulator * self.dx\n y = self.y\n dx = area_ratio * self.dx\n dy = self.dy\n child.resize(x, y, dx, dy)\n ratio_accumulator += area_ratio\n\n def on_reposition_window(self, target, x, y):\n if self.contains_point(x, y):\n windows_from = target.parent\n windows_to = self.get_child_at_point(x, y).windows\n windows_from.detach_child(target)\n idx_to = windows_to.get_child_idx_at_point(x, y)\n if idx_to is None:\n idx_to = 0\n else:\n idx_to += 1\n windows_to.attach_child_at_idx(idx_to, target)\n return False\n",
"step-5": "import widget\nimport column\n\nclass Columns(widget.Widget):\n def __init__(self, parent):\n super(Columns, self).__init__(parent)\n # self.root.mouse.on_drag_release.append(self.on_drag_release)\n\n \"\"\"\n def on_drag_release(self, x0, y0, x, y):\n if not self.contains_point(x0, y0):\n return None\n if not self.contains_point(x, y):\n return None\n\n idx_from = self.get_child_idx_at_point(x0, y0)\n idx_to = self.get_child_idx_at_point(x, y)\n\n self.reorder_children(idx_from, idx_to)\n \"\"\"\n \n\n def add_column(self):\n col = column.Column(self)\n self.children.append(col)\n\n\n def update(self):\n area_sum = len(self.children)\n ratio_accumulator = 0\n\n for child in self.children:\n area_share = 1\n area_ratio = float(area_share)/area_sum\n\n x = self.x + ratio_accumulator * self.dx\n y = self.y\n dx = area_ratio * self.dx\n dy = self.dy\n\n child.resize(x, y, dx, dy)\n\n ratio_accumulator += area_ratio\n\n def on_reposition_window(self, target, x, y):\n if self.contains_point(x, y):\n windows_from = target.parent\n windows_to = self.get_child_at_point(x, y).windows\n windows_from.detach_child(target)\n idx_to = windows_to.get_child_idx_at_point(x, y)\n if idx_to is None:\n idx_to = 0\n else:\n idx_to += 1\n windows_to.attach_child_at_idx(idx_to, target)\n return False\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = patterns('', url('^create_new/$',
'hx_lti_assignment.views.create_new_assignment', name=
'create_new_assignment'), url('^(?P<id>[0-9]+)/edit/',
'hx_lti_assignment.views.edit_assignment', name='edit_assignment'), url
('^(?P<id>[0-9]+)/delete/', 'hx_lti_assignment.views.delete_assignment',
name='delete_assignment'), url('^import_assignment/$',
'hx_lti_assignment.views.import_assignment', name='import_assignment'),
url('^(?P<course_id>[0-9]+)/get_assignments',
'hx_lti_assignment.views.assignments_from_course', name=
'assignments_from_course'), url(
'^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import'
, 'hx_lti_assignment.views.moving_assignment', name='moving_assignment'))
<|reserved_special_token_1|>
from django.conf.urls import patterns, url
urlpatterns = patterns('', url('^create_new/$',
'hx_lti_assignment.views.create_new_assignment', name=
'create_new_assignment'), url('^(?P<id>[0-9]+)/edit/',
'hx_lti_assignment.views.edit_assignment', name='edit_assignment'), url
('^(?P<id>[0-9]+)/delete/', 'hx_lti_assignment.views.delete_assignment',
name='delete_assignment'), url('^import_assignment/$',
'hx_lti_assignment.views.import_assignment', name='import_assignment'),
url('^(?P<course_id>[0-9]+)/get_assignments',
'hx_lti_assignment.views.assignments_from_course', name=
'assignments_from_course'), url(
'^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import'
, 'hx_lti_assignment.views.moving_assignment', name='moving_assignment'))
<|reserved_special_token_1|>
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(
r'^create_new/$',
'hx_lti_assignment.views.create_new_assignment',
name="create_new_assignment",
),
url(
r'^(?P<id>[0-9]+)/edit/',
'hx_lti_assignment.views.edit_assignment',
name="edit_assignment",
),
url(
r'^(?P<id>[0-9]+)/delete/',
'hx_lti_assignment.views.delete_assignment',
name="delete_assignment",
),
url(
r'^import_assignment/$',
'hx_lti_assignment.views.import_assignment',
name="import_assignment",
),
url(
r'^(?P<course_id>[0-9]+)/get_assignments',
'hx_lti_assignment.views.assignments_from_course',
name="assignments_from_course",
),
url(
r'^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import',
'hx_lti_assignment.views.moving_assignment',
name="moving_assignment",
),
)
|
flexible
|
{
"blob_id": "2194fb4f0b0618f1c8db39f659a4890457f45b1d",
"index": 3963,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^create_new/$',\n 'hx_lti_assignment.views.create_new_assignment', name=\n 'create_new_assignment'), url('^(?P<id>[0-9]+)/edit/',\n 'hx_lti_assignment.views.edit_assignment', name='edit_assignment'), url\n ('^(?P<id>[0-9]+)/delete/', 'hx_lti_assignment.views.delete_assignment',\n name='delete_assignment'), url('^import_assignment/$',\n 'hx_lti_assignment.views.import_assignment', name='import_assignment'),\n url('^(?P<course_id>[0-9]+)/get_assignments',\n 'hx_lti_assignment.views.assignments_from_course', name=\n 'assignments_from_course'), url(\n '^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import'\n , 'hx_lti_assignment.views.moving_assignment', name='moving_assignment'))\n",
"step-3": "from django.conf.urls import patterns, url\nurlpatterns = patterns('', url('^create_new/$',\n 'hx_lti_assignment.views.create_new_assignment', name=\n 'create_new_assignment'), url('^(?P<id>[0-9]+)/edit/',\n 'hx_lti_assignment.views.edit_assignment', name='edit_assignment'), url\n ('^(?P<id>[0-9]+)/delete/', 'hx_lti_assignment.views.delete_assignment',\n name='delete_assignment'), url('^import_assignment/$',\n 'hx_lti_assignment.views.import_assignment', name='import_assignment'),\n url('^(?P<course_id>[0-9]+)/get_assignments',\n 'hx_lti_assignment.views.assignments_from_course', name=\n 'assignments_from_course'), url(\n '^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import'\n , 'hx_lti_assignment.views.moving_assignment', name='moving_assignment'))\n",
"step-4": "from django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n '',\n url(\n r'^create_new/$',\n 'hx_lti_assignment.views.create_new_assignment',\n name=\"create_new_assignment\",\n ),\n url(\n r'^(?P<id>[0-9]+)/edit/',\n 'hx_lti_assignment.views.edit_assignment',\n name=\"edit_assignment\",\n ),\n url(\n r'^(?P<id>[0-9]+)/delete/',\n 'hx_lti_assignment.views.delete_assignment',\n name=\"delete_assignment\",\n ),\n url(\n r'^import_assignment/$',\n 'hx_lti_assignment.views.import_assignment',\n name=\"import_assignment\",\n ),\n url(\n r'^(?P<course_id>[0-9]+)/get_assignments',\n 'hx_lti_assignment.views.assignments_from_course',\n name=\"assignments_from_course\",\n ),\n url(\n r'^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import',\n 'hx_lti_assignment.views.moving_assignment',\n name=\"moving_assignment\",\n ),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),
schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False
) as dag:
None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DAG_DEFAULT_ARGS = {'owner': 'airflow', 'depends_on_past': False, 'retries':
1, 'retry_delay': timedelta(minutes=1)}
with DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),
schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False
) as dag:
None
<|reserved_special_token_1|>
from airflow import DAG
from datetime import date, timedelta, datetime
DAG_DEFAULT_ARGS = {'owner': 'airflow', 'depends_on_past': False, 'retries':
1, 'retry_delay': timedelta(minutes=1)}
with DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),
schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False
) as dag:
None
<|reserved_special_token_1|>
# load the dependencies
from airflow import DAG
from datetime import date, timedelta, datetime
# default_args are the default arguments applied to the DAG and all inherited tasks
DAG_DEFAULT_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
with DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1), schedule_interval="@daily", default_args=DAG_DEFAULT_ARGS, catchup=False) as dag:
None
|
flexible
|
{
"blob_id": "436cc06778bf9ac9e04a897f4a4db90c595d943c",
"index": 5969,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),\n schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False\n ) as dag:\n None\n",
"step-3": "<mask token>\nDAG_DEFAULT_ARGS = {'owner': 'airflow', 'depends_on_past': False, 'retries':\n 1, 'retry_delay': timedelta(minutes=1)}\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),\n schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False\n ) as dag:\n None\n",
"step-4": "from airflow import DAG\nfrom datetime import date, timedelta, datetime\nDAG_DEFAULT_ARGS = {'owner': 'airflow', 'depends_on_past': False, 'retries':\n 1, 'retry_delay': timedelta(minutes=1)}\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),\n schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False\n ) as dag:\n None\n",
"step-5": "# load the dependencies\nfrom airflow import DAG\nfrom datetime import date, timedelta, datetime\n\n# default_args are the default arguments applied to the DAG and all inherited tasks\nDAG_DEFAULT_ARGS = {\n\t'owner': 'airflow',\n\t'depends_on_past': False,\n\t'retries': 1,\n\t'retry_delay': timedelta(minutes=1)\n}\n\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1), schedule_interval=\"@daily\", default_args=DAG_DEFAULT_ARGS, catchup=False) as dag:\n\tNone\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@aluno.route('/hello')
def hello():
return 'Hello, aluno'
@aluno.route('/reseta', methods=['POST'])
def reseta():
sqlaluno = 'DELETE FROM aluno'
sqldisciplina = 'DELETE FROM disciplina'
sqlprofessor = 'DELETE FROM professor'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route('/alunos', methods=['GET'])
def alunos_retorna_lista():
sql = 'SELECT * FROM aluno'
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({'id': id, 'nome': nome})
return jsonify(resultados), 200
<|reserved_special_token_0|>
@aluno.route('/alunos', methods=['POST'])
def adiciona_alunos():
dados = request.get_json()
params = dados['nome'],
sql = 'INSERT INTO aluno (nome) VALUES (?)'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route('/alunos/<int:id>', methods=['PUT'])
def editar_aluno(id):
dados = request.get_json()
params = dados['nome'], id
sql = 'UPDATE aluno SET nome = ? WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(dados['nome']), 200
@aluno.route('/alunos/<int:id>', methods=['DELETE'])
def deletar_aluno(id):
params = id,
sql = 'DELETE FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(id), 200
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@aluno.route('/hello')
def hello():
return 'Hello, aluno'
@aluno.route('/reseta', methods=['POST'])
def reseta():
sqlaluno = 'DELETE FROM aluno'
sqldisciplina = 'DELETE FROM disciplina'
sqlprofessor = 'DELETE FROM professor'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route('/alunos', methods=['GET'])
def alunos_retorna_lista():
sql = 'SELECT * FROM aluno'
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({'id': id, 'nome': nome})
return jsonify(resultados), 200
@aluno.route('/alunos/<int:id>', methods=['GET'])
def aluno_por_id(id):
sql = 'SELECT id, nome FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (id,))
r = cursor.fetchone()
if r == None:
return None
return {'id': r[0], 'nome': r[1]}
@aluno.route('/alunos', methods=['POST'])
def adiciona_alunos():
dados = request.get_json()
params = dados['nome'],
sql = 'INSERT INTO aluno (nome) VALUES (?)'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route('/alunos/<int:id>', methods=['PUT'])
def editar_aluno(id):
dados = request.get_json()
params = dados['nome'], id
sql = 'UPDATE aluno SET nome = ? WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(dados['nome']), 200
@aluno.route('/alunos/<int:id>', methods=['DELETE'])
def deletar_aluno(id):
params = id,
sql = 'DELETE FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(id), 200
<|reserved_special_token_1|>
<|reserved_special_token_0|>
aluno = Blueprint('aluno', __name__)
@aluno.route('/hello')
def hello():
return 'Hello, aluno'
@aluno.route('/reseta', methods=['POST'])
def reseta():
sqlaluno = 'DELETE FROM aluno'
sqldisciplina = 'DELETE FROM disciplina'
sqlprofessor = 'DELETE FROM professor'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route('/alunos', methods=['GET'])
def alunos_retorna_lista():
sql = 'SELECT * FROM aluno'
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({'id': id, 'nome': nome})
return jsonify(resultados), 200
@aluno.route('/alunos/<int:id>', methods=['GET'])
def aluno_por_id(id):
sql = 'SELECT id, nome FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (id,))
r = cursor.fetchone()
if r == None:
return None
return {'id': r[0], 'nome': r[1]}
@aluno.route('/alunos', methods=['POST'])
def adiciona_alunos():
dados = request.get_json()
params = dados['nome'],
sql = 'INSERT INTO aluno (nome) VALUES (?)'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route('/alunos/<int:id>', methods=['PUT'])
def editar_aluno(id):
dados = request.get_json()
params = dados['nome'], id
sql = 'UPDATE aluno SET nome = ? WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(dados['nome']), 200
@aluno.route('/alunos/<int:id>', methods=['DELETE'])
def deletar_aluno(id):
params = id,
sql = 'DELETE FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(id), 200
<|reserved_special_token_1|>
from flask import Blueprint, request, jsonify
from to_dict import *
from validacao import *
import sqlite3
from migration import conectar, create_database
from contextlib import closing
aluno = Blueprint('aluno', __name__)
@aluno.route('/hello')
def hello():
return 'Hello, aluno'
@aluno.route('/reseta', methods=['POST'])
def reseta():
sqlaluno = 'DELETE FROM aluno'
sqldisciplina = 'DELETE FROM disciplina'
sqlprofessor = 'DELETE FROM professor'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route('/alunos', methods=['GET'])
def alunos_retorna_lista():
sql = 'SELECT * FROM aluno'
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({'id': id, 'nome': nome})
return jsonify(resultados), 200
@aluno.route('/alunos/<int:id>', methods=['GET'])
def aluno_por_id(id):
sql = 'SELECT id, nome FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (id,))
r = cursor.fetchone()
if r == None:
return None
return {'id': r[0], 'nome': r[1]}
@aluno.route('/alunos', methods=['POST'])
def adiciona_alunos():
dados = request.get_json()
params = dados['nome'],
sql = 'INSERT INTO aluno (nome) VALUES (?)'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route('/alunos/<int:id>', methods=['PUT'])
def editar_aluno(id):
dados = request.get_json()
params = dados['nome'], id
sql = 'UPDATE aluno SET nome = ? WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(dados['nome']), 200
@aluno.route('/alunos/<int:id>', methods=['DELETE'])
def deletar_aluno(id):
params = id,
sql = 'DELETE FROM aluno WHERE id = ?'
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
conn.commit()
return jsonify(id), 200
<|reserved_special_token_1|>
from flask import Blueprint, request, jsonify
from to_dict import *
from validacao import *
import sqlite3
from migration import conectar, create_database
from contextlib import closing
aluno = Blueprint("aluno", __name__)
@aluno.route("/hello")
def hello():
return "Hello, aluno"
@aluno.route("/reseta", methods = ["POST"])
def reseta():
sqlaluno = """DELETE FROM aluno"""
sqldisciplina = """DELETE FROM disciplina"""
sqlprofessor = """DELETE FROM professor"""
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route("/alunos", methods = ["GET"])
def alunos_retorna_lista():
sql = """SELECT * FROM aluno"""
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({"id": id, "nome": nome})
return jsonify(resultados), 200
#return jsonify(alunos), 200
@aluno.route('/alunos/<int:id>', methods = ["GET"])
def aluno_por_id(id):
sql = "SELECT id, nome FROM aluno WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (id, ))
r = cursor.fetchone()
if r == None: return None
return {"id": r[0], "nome": r[1]}
@aluno.route("/alunos", methods = ["POST"])
def adiciona_alunos():
dados = request.get_json()
params = (dados['nome'],)
sql = "INSERT INTO aluno (nome) VALUES (?)"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route("/alunos/<int:id>", methods = ["PUT"])
def editar_aluno(id):
dados = request.get_json()
params = (dados['nome'], id)
sql = "UPDATE aluno SET nome = ? WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(dados['nome']), 200
# for aluno in alunos:
# if aluno['id'] == id:
# aluno['nome'] = request.get_json().get('nome')
# return jsonify(aluno), 200
# return jsonify({'erro': 'aluno não encontrado'}), 404
@aluno.route("/alunos/<int:id>", methods = ["DELETE"])
def deletar_aluno(id):
params = (id,)
sql = "DELETE FROM aluno WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(id), 200
|
flexible
|
{
"blob_id": "5068336ca1a180e09a7efd41eea596cdcebb33ae",
"index": 5586,
"step-1": "<mask token>\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n<mask token>\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-2": "<mask token>\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['GET'])\ndef aluno_por_id(id):\n sql = 'SELECT id, nome FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id,))\n r = cursor.fetchone()\n if r == None:\n return None\n return {'id': r[0], 'nome': r[1]}\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-3": "<mask token>\naluno = Blueprint('aluno', __name__)\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['GET'])\ndef aluno_por_id(id):\n sql = 'SELECT id, nome FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id,))\n r = cursor.fetchone()\n if r == None:\n return None\n return {'id': r[0], 'nome': r[1]}\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-4": "from flask import Blueprint, request, jsonify\nfrom to_dict import *\nfrom validacao import *\nimport sqlite3\nfrom migration import conectar, create_database\nfrom contextlib import closing\naluno = Blueprint('aluno', __name__)\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['GET'])\ndef aluno_por_id(id):\n sql = 'SELECT id, nome FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id,))\n r = cursor.fetchone()\n if r == None:\n return None\n return {'id': r[0], 'nome': r[1]}\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-5": "from flask import Blueprint, request, jsonify\nfrom to_dict import *\nfrom validacao import *\nimport sqlite3\nfrom migration import conectar, create_database\nfrom contextlib import closing\n\naluno = Blueprint(\"aluno\", __name__)\n\n@aluno.route(\"/hello\")\ndef hello():\n return \"Hello, aluno\"\n\n@aluno.route(\"/reseta\", methods = [\"POST\"])\ndef reseta():\n sqlaluno = \"\"\"DELETE FROM aluno\"\"\"\n sqldisciplina = \"\"\"DELETE FROM disciplina\"\"\"\n sqlprofessor = \"\"\"DELETE FROM professor\"\"\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n@aluno.route(\"/alunos\", methods = [\"GET\"])\ndef alunos_retorna_lista():\n sql = \"\"\"SELECT * FROM aluno\"\"\"\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({\"id\": id, \"nome\": nome})\n return jsonify(resultados), 200\n #return jsonify(alunos), 200\n\n@aluno.route('/alunos/<int:id>', methods = [\"GET\"])\ndef aluno_por_id(id):\n sql = \"SELECT id, nome FROM aluno WHERE id = ?\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id, ))\n r = cursor.fetchone()\n if r == None: return None\n return {\"id\": r[0], \"nome\": r[1]}\n\n\n@aluno.route(\"/alunos\", methods = [\"POST\"])\ndef adiciona_alunos():\n dados = request.get_json()\n params = (dados['nome'],)\n sql = \"INSERT INTO aluno (nome) VALUES (?)\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (params))\n conn.commit()\n return jsonify(cursor.lastrowid)\n \n\n\n@aluno.route(\"/alunos/<int:id>\", methods = [\"PUT\"])\ndef editar_aluno(id):\n dados = request.get_json()\n params = (dados['nome'], id)\n sql = \"UPDATE aluno SET nome = ? WHERE id = ?\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (params))\n conn.commit()\n return jsonify(dados['nome']), 200\n\n# for aluno in alunos:\n# if aluno['id'] == id:\n# aluno['nome'] = request.get_json().get('nome')\n# return jsonify(aluno), 200\n# return jsonify({'erro': 'aluno não encontrado'}), 404\n\n@aluno.route(\"/alunos/<int:id>\", methods = [\"DELETE\"])\ndef deletar_aluno(id):\n params = (id,)\n sql = \"DELETE FROM aluno WHERE id = ?\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (params))\n conn.commit()\n return jsonify(id), 200",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class Network:
def placeholder_inputs(self, batch_size, num_point):
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(
batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training,
bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv1',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv2', bn_decay=
bn_decay, activation_fn=None)
source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=
'VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv3',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv4', bn_decay=
bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [
batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=
is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,
scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
<|reserved_special_token_0|>
class Network:
def placeholder_inputs(self, batch_size, num_point):
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(
batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training,
bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv1',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv2', bn_decay=
bn_decay, activation_fn=None)
source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=
'VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv3',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv4', bn_decay=
bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [
batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=
is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,
scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
loss = tf_util_loss.chamfer(predicted_pointclouds_pl,
source_pointclouds_pl)
return loss
if __name__ == '__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32, 1024, 3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
<|reserved_special_token_0|>
class Network:
def placeholder_inputs(self, batch_size, num_point):
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(
batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training,
bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv1',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv2', bn_decay=
bn_decay, activation_fn=None)
source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=
'VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv3',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv4', bn_decay=
bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [
batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=
is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,
scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
loss = tf_util_loss.chamfer(predicted_pointclouds_pl,
source_pointclouds_pl)
return loss
if __name__ == '__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32, 1024, 3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs)
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
import math
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
import tf_util_loss
class Network:
def placeholder_inputs(self, batch_size, num_point):
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(
batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training,
bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv1',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv2', bn_decay=
bn_decay, activation_fn=None)
source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=
'VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',
stride=[1, 1], bn=True, is_training=is_training, scope='conv3',
bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,
1], bn=True, is_training=is_training, scope='conv4', bn_decay=
bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [
batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=
is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,
scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
loss = tf_util_loss.chamfer(predicted_pointclouds_pl,
source_pointclouds_pl)
return loss
if __name__ == '__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32, 1024, 3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs)
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
import math
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
# from transform_nets import input_transform_net, feature_transform_net
import tf_util_loss
class Network:
def placeholder_inputs(self,batch_size, num_point):
# with tf.variable_scope('inputs') as ip:
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
# with tf.variable_scope('PointNet') as pn:
# Comment above two lines to have same points for loss and features and also change the variable names in the next line.
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay, activation_fn=None)
# Symmetric function: max pooling
source_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))
loss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)
return loss
if __name__=='__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32,1024,3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs)
|
flexible
|
{
"blob_id": "e4a0f26afe8c78e4abbd85834c96ed5ba84e1f0b",
"index": 3894,
"step-1": "<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\n<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-3": "<mask token>\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\n<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\nimport tf_util_loss\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-5": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\n# from transform_nets import input_transform_net, feature_transform_net\nimport tf_util_loss\n\nclass Network:\n\tdef placeholder_inputs(self,batch_size, num_point):\n\t\t# with tf.variable_scope('inputs') as ip:\n\t\tsource_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n\t\treturn source_pointclouds_pl\n\n\tdef get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):\n\t\t\"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n\t\t# with tf.variable_scope('PointNet') as pn:\n\n\t\t# Comment above two lines to have same points for loss and features and also change the variable names in the next line.\n\t\tbatch_size = source_pointclouds_pl.get_shape()[0].value\n\t\tnum_point = source_pointclouds_pl.get_shape()[1].value\n\t\tend_points = {}\n\n\t\tinput_image = tf.expand_dims(source_pointclouds_pl, -1)\n\n\t\tnet = tf_util.conv2d(input_image, 128, [1,3],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv1', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 256, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv2', bn_decay=bn_decay, activation_fn=None)\n\n\t\t# Symmetric function: max pooling\n\t\tsource_feature = tf_util.max_pool2d(net, [num_point, 1],\n\t\t\t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tsource_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n\t\tsource_feature = tf.concat([net, source_feature], axis=3)\n\t\t\n\t\tnet = tf_util.conv2d(source_feature, 512, [1,1],\n\t\t \t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv3', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 1024, [1,1],\n\t\t \t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t \t\t\t\t\t bn=True, is_training=is_training,\n\t\t \t\t\t\t\t scope='conv4', bn_decay=bn_decay, activation_fn=None)\n\t\tsource_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n\t\t \t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tsource_global_feature = tf.reshape(source_global_feature, [batch_size, -1])\n\n\t\treturn source_global_feature\n\n\tdef decode_data(self, source_global_feature, is_training, bn_decay=None):\n\t\tbatch_size = source_global_feature.get_shape()[0].value\n\t\tnet = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)\t\t\n\t\tnet = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')\n\t\tpredicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n\t\treturn predicted_pointclouds_pl\n\n\tdef get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n\t\twith tf.variable_scope('loss') as LossEvaluation:\n\t\t\t# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))\n\t\t\tloss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)\n\t\treturn loss\n\nif __name__=='__main__':\n\twith tf.Graph().as_default():\n\t\tnet = Network()\n\t\tinputs = tf.zeros((32,1024,3))\n\t\toutputs = net.get_model(inputs, 1024, tf.constant(True))\n\t\tprint(outputs)",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
"""
CP1404 - Practical
Code that produces a random number between 1 and 100 inclusive
Rhys Simpson
"""
# 1.
# smallest number 5; largest number 20
# 2.
# smallest number 3; largest number 9
# no it can only produce 3, 5, 7, 9
# 3.
# smallest number 2.5000000000000000; largest number 5.5000000000000000
import random
print(random.randint(1, 100))
|
normal
|
{
"blob_id": "46696ee9576d74c087ae435bfd304c8346530ab2",
"index": 9804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(random.randint(1, 100))\n",
"step-3": "<mask token>\nimport random\nprint(random.randint(1, 100))\n",
"step-4": "\"\"\"\nCP1404 - Practical\nCode that produces a random number between 1 and 100 inclusive\n\nRhys Simpson\n\"\"\"\n# 1.\n# smallest number 5; largest number 20\n\n# 2.\n# smallest number 3; largest number 9\n# no it can only produce 3, 5, 7, 9\n\n# 3.\n# smallest number 2.5000000000000000; largest number 5.5000000000000000\n\nimport random\nprint(random.randint(1, 100))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def plot_image(img, ax, title):
ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')
ax.set_title(title, fontsize=20)
<|reserved_special_token_0|>
def plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):
image = vsc.transform(image).to(vsc.device)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z = z.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vsc), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
if alpha is not None:
title = 'Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz, alpha)
else:
title = 'Latent Dimension %d' % latent_sz
ax1.set_title(title, fontsize=20)
plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):
xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))
img = xs.cpu()[0]
z = zs.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vae), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
ax1.set_title('Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz,
alpha), fontsize=20)
plot_image(to_numpy(img, vae), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot_image(img, ax, title):
ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')
ax.set_title(title, fontsize=20)
def to_numpy(image, vsc):
return torchvision.utils.make_grid(image.view(1, vsc.channels, vsc.
height, vsc.width)).cpu().detach().numpy()
def plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):
image = vsc.transform(image).to(vsc.device)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z = z.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vsc), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
if alpha is not None:
title = 'Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz, alpha)
else:
title = 'Latent Dimension %d' % latent_sz
ax1.set_title(title, fontsize=20)
plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):
xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))
img = xs.cpu()[0]
z = zs.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vae), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
ax1.set_title('Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz,
alpha), fontsize=20)
plot_image(to_numpy(img, vae), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot_image(img, ax, title):
ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')
ax.set_title(title, fontsize=20)
def to_numpy(image, vsc):
return torchvision.utils.make_grid(image.view(1, vsc.channels, vsc.
height, vsc.width)).cpu().detach().numpy()
def plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):
image = vsc.transform(image).to(vsc.device)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z = z.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vsc), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
if alpha is not None:
title = 'Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz, alpha)
else:
title = 'Latent Dimension %d' % latent_sz
ax1.set_title(title, fontsize=20)
plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):
xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))
img = xs.cpu()[0]
z = zs.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vae), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
ax1.set_title('Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz,
alpha), fontsize=20)
plot_image(to_numpy(img, vae), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_horizontal_traversal(image, vsc, latent_sz, length, delta,
threshold=0.0001, plot_all=False, plot_list=None, width=1 / 4,
n_indices=15, plot=True):
image = vsc.transform(image).to(vsc.device)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z_ = z.cpu().detach().numpy()[0]
if plot:
plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')
plt.scatter(np.arange(latent_sz), z_, color='blue')
plt.show()
non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]
inds = np.random.choice(non_zero, n_indices)
if plot:
print(inds)
if not plot_all:
non_zero = inds
if plot_list:
non_zero = plot_list
if plot:
print(non_zero)
hor_traversal = []
for ind in non_zero:
images = []
z1 = z.clone()
for i in range(length):
img = to_numpy(vsc.model.decode(z1), vsc)
img = np.transpose(img, (1, 2, 0))
img[:, 0] = 1
img[:, -1] = 1
img[0, :] = 1
img[-1, :] = 1
images.append(img)
z1[0, ind] = z1[0, ind] + delta if z[0, ind] < 0 else z1[0, ind
] - delta
hor_traversal.append(np.concatenate(images, axis=1))
traversal = np.concatenate(hor_traversal, axis=0)
if plot:
plt.figure(figsize=(14, 24))
plt.axis('off')
plt.imshow(traversal)
plt.show()
return traversal
<|reserved_special_token_1|>
import random
import numpy as np
import matplotlib.pyplot as plt
import torchvision
def plot_image(img, ax, title):
ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')
ax.set_title(title, fontsize=20)
def to_numpy(image, vsc):
return torchvision.utils.make_grid(image.view(1, vsc.channels, vsc.
height, vsc.width)).cpu().detach().numpy()
def plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):
image = vsc.transform(image).to(vsc.device)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z = z.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vsc), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
if alpha is not None:
title = 'Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz, alpha)
else:
title = 'Latent Dimension %d' % latent_sz
ax1.set_title(title, fontsize=20)
plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):
xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))
img = xs.cpu()[0]
z = zs.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))
plot_image(to_numpy(image, vae), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
ax1.set_title('Latent Dimension %d - $\\alpha$ = %.2f ' % (latent_sz,
alpha), fontsize=20)
plot_image(to_numpy(img, vae), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_horizontal_traversal(image, vsc, latent_sz, length, delta,
threshold=0.0001, plot_all=False, plot_list=None, width=1 / 4,
n_indices=15, plot=True):
image = vsc.transform(image).to(vsc.device)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z_ = z.cpu().detach().numpy()[0]
if plot:
plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')
plt.scatter(np.arange(latent_sz), z_, color='blue')
plt.show()
non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]
inds = np.random.choice(non_zero, n_indices)
if plot:
print(inds)
if not plot_all:
non_zero = inds
if plot_list:
non_zero = plot_list
if plot:
print(non_zero)
hor_traversal = []
for ind in non_zero:
images = []
z1 = z.clone()
for i in range(length):
img = to_numpy(vsc.model.decode(z1), vsc)
img = np.transpose(img, (1, 2, 0))
img[:, 0] = 1
img[:, -1] = 1
img[0, :] = 1
img[-1, :] = 1
images.append(img)
z1[0, ind] = z1[0, ind] + delta if z[0, ind] < 0 else z1[0, ind
] - delta
hor_traversal.append(np.concatenate(images, axis=1))
traversal = np.concatenate(hor_traversal, axis=0)
if plot:
plt.figure(figsize=(14, 24))
plt.axis('off')
plt.imshow(traversal)
plt.show()
return traversal
<|reserved_special_token_1|>
import random
import numpy as np
import matplotlib.pyplot as plt
import torchvision
def plot_image(img, ax, title):
ax.imshow(np.transpose(img, (1,2,0)) , interpolation='nearest')
ax.set_title(title, fontsize=20)
def to_numpy(image, vsc):
return torchvision.utils.make_grid(
image.view(1, vsc.channels, vsc.height, vsc.width)
).cpu().detach().numpy()
def plot_encoding(image, vsc, latent_sz, alpha=None, width=1/7):
image = vsc.transform(image).to(vsc.device)
# decoded, mu, logvar, logspike = vsc.model.forward(image)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z = z.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14,5))
plot_image(to_numpy(image, vsc), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
if alpha is not None:
title = r"Latent Dimension %d - $\alpha$ = %.2f " % (latent_sz, alpha)
else:
title = r"Latent Dimension %d" % (latent_sz)
ax1.set_title(title, fontsize=20)
plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1/7):
xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))
img = xs.cpu()[0]
z = zs.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14,5))
plot_image(to_numpy(image, vae), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
ax1.set_title(r"Latent Dimension %d - $\alpha$ = %.2f " % \
(latent_sz, alpha), fontsize=20)
plot_image(to_numpy(img, vae), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_horizontal_traversal(image, vsc, latent_sz, length,
delta, threshold=1e-4, plot_all=False,
plot_list=None, width=1/4, n_indices=15, plot=True):
image = vsc.transform(image).to(vsc.device)
# decoded, mu, logvar, logspike = vsc.model.forward(image)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z_ = z.cpu().detach().numpy()[0]
if plot:
plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')
plt.scatter(np.arange(latent_sz), z_, color='blue')
plt.show()
non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]
inds = np.random.choice(non_zero, n_indices)
if plot:
print(inds)
if not plot_all:
non_zero = inds # [ind]
if plot_list:
non_zero = plot_list
if plot:
print(non_zero)
hor_traversal = []
for ind in non_zero:
images = []
z1 = z.clone()
for i in range(length):
img = to_numpy(vsc.model.decode(z1), vsc)
img = np.transpose(img, (1,2,0))
img[:,0] = 1
img[:,-1] = 1
img[0,:] = 1
img[-1,:] = 1
images.append(img)
z1[0, ind] = z1[0, ind] + delta if z[0,ind] < 0 else z1[0, ind] - delta
hor_traversal.append(np.concatenate(images, axis=1))
traversal = np.concatenate(hor_traversal, axis=0)
if plot:
plt.figure(figsize=(14,24))
plt.axis('off')
plt.imshow(traversal)
plt.show()
return traversal
|
flexible
|
{
"blob_id": "ae27f97b5633309d85b9492e1a0f268847c24cd5",
"index": 9366,
"step-1": "<mask token>\n\n\ndef plot_image(img, ax, title):\n ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')\n ax.set_title(title, fontsize=20)\n\n\n<mask token>\n\n\ndef plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):\n image = vsc.transform(image).to(vsc.device)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z = z.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vsc), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n if alpha is not None:\n title = 'Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz, alpha)\n else:\n title = 'Latent Dimension %d' % latent_sz\n ax1.set_title(title, fontsize=20)\n plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\ndef plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):\n xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))\n img = xs.cpu()[0]\n z = zs.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vae), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n ax1.set_title('Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz,\n alpha), fontsize=20)\n plot_image(to_numpy(img, vae), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_image(img, ax, title):\n ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')\n ax.set_title(title, fontsize=20)\n\n\ndef to_numpy(image, vsc):\n return torchvision.utils.make_grid(image.view(1, vsc.channels, vsc.\n height, vsc.width)).cpu().detach().numpy()\n\n\ndef plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):\n image = vsc.transform(image).to(vsc.device)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z = z.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vsc), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n if alpha is not None:\n title = 'Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz, alpha)\n else:\n title = 'Latent Dimension %d' % latent_sz\n ax1.set_title(title, fontsize=20)\n plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\ndef plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):\n xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))\n img = xs.cpu()[0]\n z = zs.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vae), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n ax1.set_title('Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz,\n alpha), fontsize=20)\n plot_image(to_numpy(img, vae), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plot_image(img, ax, title):\n ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')\n ax.set_title(title, fontsize=20)\n\n\ndef to_numpy(image, vsc):\n return torchvision.utils.make_grid(image.view(1, vsc.channels, vsc.\n height, vsc.width)).cpu().detach().numpy()\n\n\ndef plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):\n image = vsc.transform(image).to(vsc.device)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z = z.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vsc), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n if alpha is not None:\n title = 'Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz, alpha)\n else:\n title = 'Latent Dimension %d' % latent_sz\n ax1.set_title(title, fontsize=20)\n plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\ndef plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):\n xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))\n img = xs.cpu()[0]\n z = zs.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vae), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n ax1.set_title('Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz,\n alpha), fontsize=20)\n plot_image(to_numpy(img, vae), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\ndef plot_horizontal_traversal(image, vsc, latent_sz, length, delta,\n threshold=0.0001, plot_all=False, plot_list=None, width=1 / 4,\n n_indices=15, plot=True):\n image = vsc.transform(image).to(vsc.device)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z_ = z.cpu().detach().numpy()[0]\n if plot:\n plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')\n plt.scatter(np.arange(latent_sz), z_, color='blue')\n plt.show()\n non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]\n inds = np.random.choice(non_zero, n_indices)\n if plot:\n print(inds)\n if not plot_all:\n non_zero = inds\n if plot_list:\n non_zero = plot_list\n if plot:\n print(non_zero)\n hor_traversal = []\n for ind in non_zero:\n images = []\n z1 = z.clone()\n for i in range(length):\n img = to_numpy(vsc.model.decode(z1), vsc)\n img = np.transpose(img, (1, 2, 0))\n img[:, 0] = 1\n img[:, -1] = 1\n img[0, :] = 1\n img[-1, :] = 1\n images.append(img)\n z1[0, ind] = z1[0, ind] + delta if z[0, ind] < 0 else z1[0, ind\n ] - delta\n hor_traversal.append(np.concatenate(images, axis=1))\n traversal = np.concatenate(hor_traversal, axis=0)\n if plot:\n plt.figure(figsize=(14, 24))\n plt.axis('off')\n plt.imshow(traversal)\n plt.show()\n return traversal\n",
"step-4": "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torchvision\n\n\ndef plot_image(img, ax, title):\n ax.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')\n ax.set_title(title, fontsize=20)\n\n\ndef to_numpy(image, vsc):\n return torchvision.utils.make_grid(image.view(1, vsc.channels, vsc.\n height, vsc.width)).cpu().detach().numpy()\n\n\ndef plot_encoding(image, vsc, latent_sz, alpha=None, width=1 / 7):\n image = vsc.transform(image).to(vsc.device)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z = z.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vsc), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n if alpha is not None:\n title = 'Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz, alpha)\n else:\n title = 'Latent Dimension %d' % latent_sz\n ax1.set_title(title, fontsize=20)\n plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\ndef plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1 / 7):\n xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))\n img = xs.cpu()[0]\n z = zs.cpu().detach().numpy()[0]\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14, 5))\n plot_image(to_numpy(image, vae), ax0, 'Input Image')\n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n ax1.set_title('Latent Dimension %d - $\\\\alpha$ = %.2f ' % (latent_sz,\n alpha), fontsize=20)\n plot_image(to_numpy(img, vae), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n\ndef plot_horizontal_traversal(image, vsc, latent_sz, length, delta,\n threshold=0.0001, plot_all=False, plot_list=None, width=1 / 4,\n n_indices=15, plot=True):\n image = vsc.transform(image).to(vsc.device)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z_ = z.cpu().detach().numpy()[0]\n if plot:\n plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')\n plt.scatter(np.arange(latent_sz), z_, color='blue')\n plt.show()\n non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]\n inds = np.random.choice(non_zero, n_indices)\n if plot:\n print(inds)\n if not plot_all:\n non_zero = inds\n if plot_list:\n non_zero = plot_list\n if plot:\n print(non_zero)\n hor_traversal = []\n for ind in non_zero:\n images = []\n z1 = z.clone()\n for i in range(length):\n img = to_numpy(vsc.model.decode(z1), vsc)\n img = np.transpose(img, (1, 2, 0))\n img[:, 0] = 1\n img[:, -1] = 1\n img[0, :] = 1\n img[-1, :] = 1\n images.append(img)\n z1[0, ind] = z1[0, ind] + delta if z[0, ind] < 0 else z1[0, ind\n ] - delta\n hor_traversal.append(np.concatenate(images, axis=1))\n traversal = np.concatenate(hor_traversal, axis=0)\n if plot:\n plt.figure(figsize=(14, 24))\n plt.axis('off')\n plt.imshow(traversal)\n plt.show()\n return traversal\n",
"step-5": "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torchvision\n\ndef plot_image(img, ax, title):\n ax.imshow(np.transpose(img, (1,2,0)) , interpolation='nearest')\n ax.set_title(title, fontsize=20)\n \ndef to_numpy(image, vsc):\n return torchvision.utils.make_grid(\n image.view(1, vsc.channels, vsc.height, vsc.width)\n ).cpu().detach().numpy()\n \ndef plot_encoding(image, vsc, latent_sz, alpha=None, width=1/7):\n image = vsc.transform(image).to(vsc.device)\n # decoded, mu, logvar, logspike = vsc.model.forward(image)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z = z.cpu().detach().numpy()[0]\n \n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14,5))\n \n plot_image(to_numpy(image, vsc), ax0, 'Input Image')\n \n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n if alpha is not None:\n title = r\"Latent Dimension %d - $\\alpha$ = %.2f \" % (latent_sz, alpha)\n else:\n title = r\"Latent Dimension %d\" % (latent_sz)\n ax1.set_title(title, fontsize=20)\n \n plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n \ndef plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1/7):\n xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))\n img = xs.cpu()[0]\n z = zs.cpu().detach().numpy()[0]\n \n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14,5))\n \n plot_image(to_numpy(image, vae), ax0, 'Input Image')\n \n ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')\n ax1.scatter(np.arange(latent_sz), z, color='blue')\n ax1.set_title(r\"Latent Dimension %d - $\\alpha$ = %.2f \" % \\\n (latent_sz, alpha), fontsize=20)\n \n plot_image(to_numpy(img, vae), ax2, 'Decoded Image')\n plt.subplots_adjust(hspace=0.5)\n\n \n \ndef plot_horizontal_traversal(image, vsc, latent_sz, length, \n delta, threshold=1e-4, plot_all=False, \n plot_list=None, width=1/4, n_indices=15, plot=True):\n image = vsc.transform(image).to(vsc.device)\n # decoded, mu, logvar, logspike = vsc.model.forward(image)\n decoded_params = vsc.model.forward(image)\n z = vsc.model.reparameterize(*decoded_params[1:])\n img = vsc.inverse_transform(vsc.model.decode(z))\n z_ = z.cpu().detach().numpy()[0]\n \n if plot:\n plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')\n plt.scatter(np.arange(latent_sz), z_, color='blue')\n plt.show()\n \n non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]\n inds = np.random.choice(non_zero, n_indices)\n if plot:\n print(inds)\n \n if not plot_all:\n non_zero = inds # [ind]\n if plot_list:\n non_zero = plot_list\n if plot: \n print(non_zero)\n \n hor_traversal = []\n for ind in non_zero:\n images = []\n z1 = z.clone()\n for i in range(length):\n img = to_numpy(vsc.model.decode(z1), vsc)\n img = np.transpose(img, (1,2,0))\n img[:,0] = 1\n img[:,-1] = 1\n img[0,:] = 1\n img[-1,:] = 1\n images.append(img)\n z1[0, ind] = z1[0, ind] + delta if z[0,ind] < 0 else z1[0, ind] - delta\n\n hor_traversal.append(np.concatenate(images, axis=1))\n traversal = np.concatenate(hor_traversal, axis=0)\n if plot:\n plt.figure(figsize=(14,24))\n plt.axis('off')\n plt.imshow(traversal)\n plt.show()\n return traversal",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sqlite3 as lite
import sys
con = lite.connect("test.db")
with con:
cur = con.cursor()
cur.execute('''CREATE TABLE Cars(Id INT, Name TEXT, Price INT)''')
cur.execute('''INSERT INTO Cars VALUES(1, 'car1', 10)''')
cur.execute('''INSERT INTO Cars VALUES(2, 'car2', 20)''')
cur.execute('''INSERT INTO Cars VALUES(3, 'car3', 30)''')
|
normal
|
{
"blob_id": "db22e568c86f008c9882181f5c1d88d5bca28570",
"index": 5416,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith con:\n cur = con.cursor()\n cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')\n cur.execute(\"INSERT INTO Cars VALUES(1, 'car1', 10)\")\n cur.execute(\"INSERT INTO Cars VALUES(2, 'car2', 20)\")\n cur.execute(\"INSERT INTO Cars VALUES(3, 'car3', 30)\")\n",
"step-3": "<mask token>\ncon = lite.connect('test.db')\nwith con:\n cur = con.cursor()\n cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')\n cur.execute(\"INSERT INTO Cars VALUES(1, 'car1', 10)\")\n cur.execute(\"INSERT INTO Cars VALUES(2, 'car2', 20)\")\n cur.execute(\"INSERT INTO Cars VALUES(3, 'car3', 30)\")\n",
"step-4": "import sqlite3 as lite\nimport sys\ncon = lite.connect('test.db')\nwith con:\n cur = con.cursor()\n cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')\n cur.execute(\"INSERT INTO Cars VALUES(1, 'car1', 10)\")\n cur.execute(\"INSERT INTO Cars VALUES(2, 'car2', 20)\")\n cur.execute(\"INSERT INTO Cars VALUES(3, 'car3', 30)\")\n",
"step-5": "import sqlite3 as lite\nimport sys\n\ncon = lite.connect(\"test.db\")\n\nwith con:\n\n cur = con.cursor()\n \n cur.execute('''CREATE TABLE Cars(Id INT, Name TEXT, Price INT)''')\n cur.execute('''INSERT INTO Cars VALUES(1, 'car1', 10)''')\n cur.execute('''INSERT INTO Cars VALUES(2, 'car2', 20)''')\n cur.execute('''INSERT INTO Cars VALUES(3, 'car3', 30)''')\n\n\n\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
form_data = request.form
sentence = form_data['sentence']
output = get_result(sentence)
return render_template('result.html', result=output)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
form_data = request.form
sentence = form_data['sentence']
output = get_result(sentence)
return render_template('result.html', result=output)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
form_data = request.form
sentence = form_data['sentence']
output = get_result(sentence)
return render_template('result.html', result=output)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, request, render_template
from utils import get_result
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
form_data = request.form
sentence = form_data['sentence']
output = get_result(sentence)
return render_template('result.html', result=output)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, request, render_template
from utils import get_result
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route("/result", methods=["POST"])
def result():
form_data = request.form
sentence = form_data['sentence']
output = get_result(sentence)
return render_template('result.html', result=output)
if __name__ == '__main__':
app.run(debug=True)
|
flexible
|
{
"blob_id": "264da5a2ab7d5c311d8a59b06c81ea2156cefd76",
"index": 9627,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, request, render_template\nfrom utils import get_result\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, request, render_template\n\nfrom utils import get_result\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/result\", methods=[\"POST\"])\ndef result():\n form_data = request.form\n sentence = form_data['sentence']\n output = get_result(sentence)\n return render_template('result.html', result=output)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Definition of SegmentTreeNode:
"""
class SegmentTreeNode:
def __init__(self, start, end):
self.start, self.end = start, end
self.left, self.right = None, None
class Solution:
"""
@param: start: start value.
@param: end: end value.
@return: The root of Segment Tree.
"""
def build(self, start, end):
# write your code here
if start > end:
return None
root = SegmentTreeNode(start, end)
if start == end:
return root
else:
root.left = Solution.build(start, start, (start + end)//2)
root.right = Solution.build(start, (start + end)//2 + 1, end)
return root
|
normal
|
{
"blob_id": "5e20a517131f7a372d701548e4f370766a84ba52",
"index": 6134,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n \"\"\"\n @param: start: start value.\n @param: end: end value.\n @return: The root of Segment Tree.\n \"\"\"\n\n def build(self, start, end):\n if start > end:\n return None\n root = SegmentTreeNode(start, end)\n if start == end:\n return root\n else:\n root.left = Solution.build(start, start, (start + end) // 2)\n root.right = Solution.build(start, (start + end) // 2 + 1, end)\n return root\n",
"step-3": "<mask token>\n\n\nclass SegmentTreeNode:\n <mask token>\n\n\nclass Solution:\n \"\"\"\n @param: start: start value.\n @param: end: end value.\n @return: The root of Segment Tree.\n \"\"\"\n\n def build(self, start, end):\n if start > end:\n return None\n root = SegmentTreeNode(start, end)\n if start == end:\n return root\n else:\n root.left = Solution.build(start, start, (start + end) // 2)\n root.right = Solution.build(start, (start + end) // 2 + 1, end)\n return root\n",
"step-4": "<mask token>\n\n\nclass SegmentTreeNode:\n\n def __init__(self, start, end):\n self.start, self.end = start, end\n self.left, self.right = None, None\n\n\nclass Solution:\n \"\"\"\n @param: start: start value.\n @param: end: end value.\n @return: The root of Segment Tree.\n \"\"\"\n\n def build(self, start, end):\n if start > end:\n return None\n root = SegmentTreeNode(start, end)\n if start == end:\n return root\n else:\n root.left = Solution.build(start, start, (start + end) // 2)\n root.right = Solution.build(start, (start + end) // 2 + 1, end)\n return root\n",
"step-5": "\"\"\"\nDefinition of SegmentTreeNode:\n\"\"\"\nclass SegmentTreeNode:\n def __init__(self, start, end):\n self.start, self.end = start, end\n self.left, self.right = None, None\n\n\nclass Solution:\n \"\"\"\n @param: start: start value.\n @param: end: end value.\n @return: The root of Segment Tree.\n \"\"\"\n def build(self, start, end):\n # write your code here\n if start > end:\n return None\n root = SegmentTreeNode(start, end)\n if start == end:\n return root\n else:\n root.left = Solution.build(start, start, (start + end)//2)\n root.right = Solution.build(start, (start + end)//2 + 1, end)\n return root",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def IsPrime(x):
for i in range(2, int(x ** 0.5) + 1):
if not x % i:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def IsPrime(x):
for i in range(2, int(x ** 0.5) + 1):
if not x % i:
return False
return True
for x in stdin:
x = x[:-1]
y = x[::-1]
a = IsPrime(int(x))
b = IsPrime(int(y))
if not a:
print('%s is not prime.' % x)
elif a and not b or a and x == y:
print('%s is prime.' % x)
else:
print('%s is emirp.' % x)
<|reserved_special_token_1|>
from sys import stdin
def IsPrime(x):
for i in range(2, int(x ** 0.5) + 1):
if not x % i:
return False
return True
for x in stdin:
x = x[:-1]
y = x[::-1]
a = IsPrime(int(x))
b = IsPrime(int(y))
if not a:
print('%s is not prime.' % x)
elif a and not b or a and x == y:
print('%s is prime.' % x)
else:
print('%s is emirp.' % x)
<|reserved_special_token_1|>
from sys import stdin
def IsPrime(x):
for i in range(2, int(x ** 0.5) + 1):
if not x % i:
return False
return True
for x in stdin:
x = x[:-1]
y = x[::-1]
a = IsPrime(int(x))
b = IsPrime(int(y))
if not a:
print("%s is not prime." %x)
elif (a and not b) or (a and x == y):
print("%s is prime." %x)
else:
print("%s is emirp." %x)
|
flexible
|
{
"blob_id": "fcfec521e071aa586febc74efb2deb0e9d0a331e",
"index": 3358,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef IsPrime(x):\n for i in range(2, int(x ** 0.5) + 1):\n if not x % i:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef IsPrime(x):\n for i in range(2, int(x ** 0.5) + 1):\n if not x % i:\n return False\n return True\n\n\nfor x in stdin:\n x = x[:-1]\n y = x[::-1]\n a = IsPrime(int(x))\n b = IsPrime(int(y))\n if not a:\n print('%s is not prime.' % x)\n elif a and not b or a and x == y:\n print('%s is prime.' % x)\n else:\n print('%s is emirp.' % x)\n",
"step-4": "from sys import stdin\n\n\ndef IsPrime(x):\n for i in range(2, int(x ** 0.5) + 1):\n if not x % i:\n return False\n return True\n\n\nfor x in stdin:\n x = x[:-1]\n y = x[::-1]\n a = IsPrime(int(x))\n b = IsPrime(int(y))\n if not a:\n print('%s is not prime.' % x)\n elif a and not b or a and x == y:\n print('%s is prime.' % x)\n else:\n print('%s is emirp.' % x)\n",
"step-5": "from sys import stdin\n\ndef IsPrime(x):\n for i in range(2, int(x ** 0.5) + 1):\n if not x % i:\n return False\n \n return True\n\nfor x in stdin:\n x = x[:-1]\n y = x[::-1]\n a = IsPrime(int(x))\n b = IsPrime(int(y))\n if not a:\n print(\"%s is not prime.\" %x)\n elif (a and not b) or (a and x == y):\n print(\"%s is prime.\" %x)\n else:\n print(\"%s is emirp.\" %x)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# vim:fileencoding=utf-8:noet
from __future__ import absolute_import, unicode_literals, print_function
import os
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
|
normal
|
{
"blob_id": "c435b0f162512bb2bc0c35e1817f64c5ef9ff7bc",
"index": 1871,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)\n ), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n",
"step-3": "from __future__ import absolute_import, unicode_literals, print_function\nimport os\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)\n ), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n",
"step-4": "# vim:fileencoding=utf-8:noet\n\nfrom __future__ import absolute_import, unicode_literals, print_function\nimport os\n\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.close('all')
<|reserved_special_token_0|>
axs[0].plot(t, s_t)
axs[0].grid('True')
axs[0].set_title('Señal Original')
axs[0].set_ylim(-0.2, 0.2)
axs[0].set_ylabel('[V]')
axs[1].plot(t, r_t)
axs[1].grid('True')
axs[1].set_title('Ruido Interferente')
axs[1].set_ylabel('[V]')
axs[1].set_xlim(0)
axs[2].plot(t, s_t + r_t)
axs[2].grid('True')
axs[2].set_title('Señal a Filtrar')
axs[2].set_ylabel('[V]')
axs[2].set_xlim(0)
axs[3].plot(t, s_filtrada)
axs[3].grid('True')
axs[3].set_title('Señal Filtrada')
axs[3].set_xlabel('t[seg]')
axs[3].set_ylabel('[V]')
axs[3].set_ylim(-0.2, 0.2)
axs[3].set_xlim(0)
bodePlot(my_tf_lp, 'Filtro Prototipo - Low Pass')
pzmap(my_tf_lp)
bodePlot(my_tf_hp, 'Filtro Destino - High Pass')
pzmap(my_tf_hp)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.close('all')
t0 = 0.0
tf = 0.005
dt = 5e-05
t = np.arange(t0, tf, dt)
f_t = 45 * 10 ** 3
w_t = 2 * np.pi * f_t
A_t = 0.2
s_t = A_t * np.sin(w_t * t)
f_r = 12 * 10 ** 3
w_r = 2 * np.pi * f_r
A_r = 2
r_t = A_r * np.sin(w_r * t)
sgnal = s_t + r_t
R1 = 290
C1 = 3.5e-09
L1 = 0.0035
k1 = 1
R2 = 700
C2 = 3.5e-09
L2 = 0.00103
k2 = 1
alfa_max = 0.9
alfa_min = 54
wp_hp = w_t
ws_hp = w_r
wp_hp_norm = wp_hp / wp_hp
ws_hp_norm = ws_hp / wp_hp
w0 = np.sqrt(1 / (L1 * C1))
wp_lp_norm = abs(-1 / wp_hp_norm)
ws_lp_norm = abs(-1 / ws_hp_norm)
eps = np.sqrt(10 ** (alfa_max / 10) - 1)
N = np.arccosh(np.sqrt((10 ** (alfa_min / 10) - 1) / eps ** 2)) / np.arccosh(
ws_lp_norm)
N = np.ceil(N)
den1_lp = [1, 0.29, 1]
den2_lp = [1, 0.7, 0.29]
p1_lp = np.roots(den1_lp)
p2_lp = np.roots(den2_lp)
my_z_lp = np.array([])
my_p_lp = np.concatenate((p1_lp, p2_lp), axis=None)
my_k_lp = 1 * 0.29
NUM, DEN = sig.zpk2tf(my_z_lp, my_p_lp, my_k_lp)
NUM_lp, DEN_lp = sig.lp2lp(NUM, DEN, w0)
my_tf_lp = transf_f(NUM_lp, DEN_lp)
NUM_hp, DEN_hp = sig.lp2hp(NUM, DEN, w0)
my_tf_hp = transf_f(NUM_hp, DEN_hp)
my_z_hp, my_p_hp, my_k_hp = sig.tf2zpk(NUM_hp, DEN_hp)
t, s_filtrada, x = sig.lsim2(my_tf_hp, sgnal, t)
fig1, axs = plt.subplots(4, 1)
axs[0].plot(t, s_t)
axs[0].grid('True')
axs[0].set_title('Señal Original')
axs[0].set_ylim(-0.2, 0.2)
axs[0].set_ylabel('[V]')
axs[1].plot(t, r_t)
axs[1].grid('True')
axs[1].set_title('Ruido Interferente')
axs[1].set_ylabel('[V]')
axs[1].set_xlim(0)
axs[2].plot(t, s_t + r_t)
axs[2].grid('True')
axs[2].set_title('Señal a Filtrar')
axs[2].set_ylabel('[V]')
axs[2].set_xlim(0)
axs[3].plot(t, s_filtrada)
axs[3].grid('True')
axs[3].set_title('Señal Filtrada')
axs[3].set_xlabel('t[seg]')
axs[3].set_ylabel('[V]')
axs[3].set_ylim(-0.2, 0.2)
axs[3].set_xlim(0)
bodePlot(my_tf_lp, 'Filtro Prototipo - Low Pass')
pzmap(my_tf_lp)
bodePlot(my_tf_hp, 'Filtro Destino - High Pass')
pzmap(my_tf_hp)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from scipy.signal import TransferFunction as transf_f
import scipy.signal as sig
from splane import bodePlot, pzmap
from matplotlib import pyplot as plt
plt.close('all')
t0 = 0.0
tf = 0.005
dt = 5e-05
t = np.arange(t0, tf, dt)
f_t = 45 * 10 ** 3
w_t = 2 * np.pi * f_t
A_t = 0.2
s_t = A_t * np.sin(w_t * t)
f_r = 12 * 10 ** 3
w_r = 2 * np.pi * f_r
A_r = 2
r_t = A_r * np.sin(w_r * t)
sgnal = s_t + r_t
R1 = 290
C1 = 3.5e-09
L1 = 0.0035
k1 = 1
R2 = 700
C2 = 3.5e-09
L2 = 0.00103
k2 = 1
alfa_max = 0.9
alfa_min = 54
wp_hp = w_t
ws_hp = w_r
wp_hp_norm = wp_hp / wp_hp
ws_hp_norm = ws_hp / wp_hp
w0 = np.sqrt(1 / (L1 * C1))
wp_lp_norm = abs(-1 / wp_hp_norm)
ws_lp_norm = abs(-1 / ws_hp_norm)
eps = np.sqrt(10 ** (alfa_max / 10) - 1)
N = np.arccosh(np.sqrt((10 ** (alfa_min / 10) - 1) / eps ** 2)) / np.arccosh(
ws_lp_norm)
N = np.ceil(N)
den1_lp = [1, 0.29, 1]
den2_lp = [1, 0.7, 0.29]
p1_lp = np.roots(den1_lp)
p2_lp = np.roots(den2_lp)
my_z_lp = np.array([])
my_p_lp = np.concatenate((p1_lp, p2_lp), axis=None)
my_k_lp = 1 * 0.29
NUM, DEN = sig.zpk2tf(my_z_lp, my_p_lp, my_k_lp)
NUM_lp, DEN_lp = sig.lp2lp(NUM, DEN, w0)
my_tf_lp = transf_f(NUM_lp, DEN_lp)
NUM_hp, DEN_hp = sig.lp2hp(NUM, DEN, w0)
my_tf_hp = transf_f(NUM_hp, DEN_hp)
my_z_hp, my_p_hp, my_k_hp = sig.tf2zpk(NUM_hp, DEN_hp)
t, s_filtrada, x = sig.lsim2(my_tf_hp, sgnal, t)
fig1, axs = plt.subplots(4, 1)
axs[0].plot(t, s_t)
axs[0].grid('True')
axs[0].set_title('Señal Original')
axs[0].set_ylim(-0.2, 0.2)
axs[0].set_ylabel('[V]')
axs[1].plot(t, r_t)
axs[1].grid('True')
axs[1].set_title('Ruido Interferente')
axs[1].set_ylabel('[V]')
axs[1].set_xlim(0)
axs[2].plot(t, s_t + r_t)
axs[2].grid('True')
axs[2].set_title('Señal a Filtrar')
axs[2].set_ylabel('[V]')
axs[2].set_xlim(0)
axs[3].plot(t, s_filtrada)
axs[3].grid('True')
axs[3].set_title('Señal Filtrada')
axs[3].set_xlabel('t[seg]')
axs[3].set_ylabel('[V]')
axs[3].set_ylim(-0.2, 0.2)
axs[3].set_xlim(0)
bodePlot(my_tf_lp, 'Filtro Prototipo - Low Pass')
pzmap(my_tf_lp)
bodePlot(my_tf_hp, 'Filtro Destino - High Pass')
pzmap(my_tf_hp)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 21:31:37 2020
@author: Emmanuel Torres Molina
"""
"""
Ejercicio 10 del TP2 de Teoría de los Circuitos II:
Un tono de 45 KHz y 200 mV de amplitud es distorsionada por un tono de 12 KHz
y 2V de amplitud. Diseñar un filtro pasa altos que atenúe la señal
interferente, de tal forma que el remanente no sea mayor que el 2% de los 200 mV.
La ganancia en alta frecuencia deberá ser de 0 db y la máxima atenuación
en la banda de paso menor a 1 dB. Emplear la aproximación que necesite menor
número de etapas.
En este caso el Filtro está Sintetizado por un Estructura RLC Pasiva + RL Pasivo.
"""
import numpy as np
from scipy.signal import TransferFunction as transf_f
import scipy.signal as sig
from splane import bodePlot, pzmap
from matplotlib import pyplot as plt
plt.close ('all')
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Vector Tiempo:
t0 = 0.0 # Tiempo Inicial
tf = 0.005
dt = 0.00005 # Incremento
t = np.arange (t0, tf, dt)
# ---------------------------------------------------------------------------
# Tono de Interés:
f_t = 45 * 10**3 # Frecuecia del Tono de mi Interés [Hz]
w_t = 2 * np.pi * f_t # [rad/seg]
A_t = 0.2 # Amplitud de mi Tono [V]
s_t = A_t * np.sin ( w_t * t )
# ---------------------------------------------------------------------------
# Ruido Interferente:
f_r = 12 * 10**3 # Frecuencia del Ruido Interferente [Hz]
w_r = 2 * np.pi * f_r # [rad/seg]
A_r= 2 # Amplitud del Ruido [V]
r_t = A_r * np.sin ( w_r * t )
sgnal = s_t + r_t
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Valores de los Elementos del Circuito:
# Etapa 1: RLC Pasivo
R1 = 290
C1 = 3.5e-9
L1 = 3.5e-3
k1 = 1
# Etapa 2: RL Pasivo
R2 = 700
C2 = 3.5e-9
L2 = 1.03e-3
k2 = 1
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Diseño del Filtro: Vamos a Realizar un Filtro High-Pass:
# Requisitos de Plantilla
alfa_max = 0.9 # Piden que sea menor a 1dB
alfa_min = 54 # el remanente no sea mayor que el 2% de los 200 mV
wp_hp = w_t
ws_hp = w_r
# Normalizo las Pulsaciones Angulares usando como norma: wp_hp
wp_hp_norm = wp_hp / wp_hp
ws_hp_norm = ws_hp / wp_hp
w0 = np.sqrt ( 1 / (L1*C1) )
# ---------------------------------------------------------------------------
# Filtro Prototipo Low-Pass: Transformación en Frecuencia: w_HP = -1 / w_LP
wp_lp_norm = abs(-1 / wp_hp_norm)
ws_lp_norm = abs(-1 / ws_hp_norm)
# Voy a Utilizar Aproximación de Chebyshev para Diseñal el Filtro:
eps = np.sqrt ( (10 **(alfa_max/10) ) - 1 )
# Orden del Filtro
N = np.arccosh ( np.sqrt ( (10**(alfa_min/10) - 1) / eps**2 ) ) / np.arccosh (ws_lp_norm)
N = np.ceil ( N ) # Redondeo para arriba
den1_lp = [1, 0.29, 1]
den2_lp = [1, 0.7, 0.29]
p1_lp = np.roots ( den1_lp )
p2_lp = np.roots ( den2_lp )
my_z_lp = np.array ([])
my_p_lp = np.concatenate ( (p1_lp, p2_lp), axis = None )
my_k_lp = 1 * 0.29
NUM, DEN = sig.zpk2tf ( my_z_lp, my_p_lp, my_k_lp )
NUM_lp, DEN_lp = sig.lp2lp ( NUM, DEN, w0 )
my_tf_lp = transf_f (NUM_lp,DEN_lp)
# ---------------------------------------------------------------------------
# Filtro Destino - Filtro High-Pass:
# Calculo W0:
NUM_hp, DEN_hp = sig.lp2hp ( NUM, DEN, w0 )
my_tf_hp = transf_f ( NUM_hp, DEN_hp )
my_z_hp, my_p_hp, my_k_hp = sig.tf2zpk (NUM_hp, DEN_hp )
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Filtrado de la Señal:
t, s_filtrada, x = sig.lsim2 ((my_tf_hp), sgnal, t )
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Ploteo de las Señales, Respuesta en Frecuencia, etc.
fig1, axs = plt.subplots(4,1)
axs[0].plot ( t, s_t )
axs[0].grid ('True')
axs[0].set_title ('Señal Original')
axs[0].set_ylim(-0.2,0.2)
axs[0].set_ylabel('[V]')
axs[1].plot ( t, r_t )
axs[1].grid ('True')
axs[1].set_title ('Ruido Interferente')
axs[1].set_ylabel('[V]')
axs[1].set_xlim(0)
axs[2].plot (t, s_t + r_t )
axs[2].grid ('True')
axs[2].set_title ('Señal a Filtrar')
axs[2].set_ylabel('[V]')
axs[2].set_xlim(0)
axs[3].plot (t, s_filtrada )
axs[3].grid ('True')
axs[3].set_title ( 'Señal Filtrada' )
axs[3].set_xlabel ('t[seg]')
axs[3].set_ylabel('[V]')
axs[3].set_ylim(-0.2,0.2)
axs[3].set_xlim(0)
# Respuesta en Frecuencia:
bodePlot (my_tf_lp, 'Filtro Prototipo - Low Pass')
pzmap (my_tf_lp)
bodePlot (my_tf_hp, 'Filtro Destino - High Pass')
pzmap (my_tf_hp)
|
flexible
|
{
"blob_id": "dd59f3b1d8b17defe4e7f30fec594d01475319d2",
"index": 6211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.close('all')\n<mask token>\naxs[0].plot(t, s_t)\naxs[0].grid('True')\naxs[0].set_title('Señal Original')\naxs[0].set_ylim(-0.2, 0.2)\naxs[0].set_ylabel('[V]')\naxs[1].plot(t, r_t)\naxs[1].grid('True')\naxs[1].set_title('Ruido Interferente')\naxs[1].set_ylabel('[V]')\naxs[1].set_xlim(0)\naxs[2].plot(t, s_t + r_t)\naxs[2].grid('True')\naxs[2].set_title('Señal a Filtrar')\naxs[2].set_ylabel('[V]')\naxs[2].set_xlim(0)\naxs[3].plot(t, s_filtrada)\naxs[3].grid('True')\naxs[3].set_title('Señal Filtrada')\naxs[3].set_xlabel('t[seg]')\naxs[3].set_ylabel('[V]')\naxs[3].set_ylim(-0.2, 0.2)\naxs[3].set_xlim(0)\nbodePlot(my_tf_lp, 'Filtro Prototipo - Low Pass')\npzmap(my_tf_lp)\nbodePlot(my_tf_hp, 'Filtro Destino - High Pass')\npzmap(my_tf_hp)\n",
"step-3": "<mask token>\nplt.close('all')\nt0 = 0.0\ntf = 0.005\ndt = 5e-05\nt = np.arange(t0, tf, dt)\nf_t = 45 * 10 ** 3\nw_t = 2 * np.pi * f_t\nA_t = 0.2\ns_t = A_t * np.sin(w_t * t)\nf_r = 12 * 10 ** 3\nw_r = 2 * np.pi * f_r\nA_r = 2\nr_t = A_r * np.sin(w_r * t)\nsgnal = s_t + r_t\nR1 = 290\nC1 = 3.5e-09\nL1 = 0.0035\nk1 = 1\nR2 = 700\nC2 = 3.5e-09\nL2 = 0.00103\nk2 = 1\nalfa_max = 0.9\nalfa_min = 54\nwp_hp = w_t\nws_hp = w_r\nwp_hp_norm = wp_hp / wp_hp\nws_hp_norm = ws_hp / wp_hp\nw0 = np.sqrt(1 / (L1 * C1))\nwp_lp_norm = abs(-1 / wp_hp_norm)\nws_lp_norm = abs(-1 / ws_hp_norm)\neps = np.sqrt(10 ** (alfa_max / 10) - 1)\nN = np.arccosh(np.sqrt((10 ** (alfa_min / 10) - 1) / eps ** 2)) / np.arccosh(\n ws_lp_norm)\nN = np.ceil(N)\nden1_lp = [1, 0.29, 1]\nden2_lp = [1, 0.7, 0.29]\np1_lp = np.roots(den1_lp)\np2_lp = np.roots(den2_lp)\nmy_z_lp = np.array([])\nmy_p_lp = np.concatenate((p1_lp, p2_lp), axis=None)\nmy_k_lp = 1 * 0.29\nNUM, DEN = sig.zpk2tf(my_z_lp, my_p_lp, my_k_lp)\nNUM_lp, DEN_lp = sig.lp2lp(NUM, DEN, w0)\nmy_tf_lp = transf_f(NUM_lp, DEN_lp)\nNUM_hp, DEN_hp = sig.lp2hp(NUM, DEN, w0)\nmy_tf_hp = transf_f(NUM_hp, DEN_hp)\nmy_z_hp, my_p_hp, my_k_hp = sig.tf2zpk(NUM_hp, DEN_hp)\nt, s_filtrada, x = sig.lsim2(my_tf_hp, sgnal, t)\nfig1, axs = plt.subplots(4, 1)\naxs[0].plot(t, s_t)\naxs[0].grid('True')\naxs[0].set_title('Señal Original')\naxs[0].set_ylim(-0.2, 0.2)\naxs[0].set_ylabel('[V]')\naxs[1].plot(t, r_t)\naxs[1].grid('True')\naxs[1].set_title('Ruido Interferente')\naxs[1].set_ylabel('[V]')\naxs[1].set_xlim(0)\naxs[2].plot(t, s_t + r_t)\naxs[2].grid('True')\naxs[2].set_title('Señal a Filtrar')\naxs[2].set_ylabel('[V]')\naxs[2].set_xlim(0)\naxs[3].plot(t, s_filtrada)\naxs[3].grid('True')\naxs[3].set_title('Señal Filtrada')\naxs[3].set_xlabel('t[seg]')\naxs[3].set_ylabel('[V]')\naxs[3].set_ylim(-0.2, 0.2)\naxs[3].set_xlim(0)\nbodePlot(my_tf_lp, 'Filtro Prototipo - Low Pass')\npzmap(my_tf_lp)\nbodePlot(my_tf_hp, 'Filtro Destino - High Pass')\npzmap(my_tf_hp)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom scipy.signal import TransferFunction as transf_f\nimport scipy.signal as sig\nfrom splane import bodePlot, pzmap\nfrom matplotlib import pyplot as plt\nplt.close('all')\nt0 = 0.0\ntf = 0.005\ndt = 5e-05\nt = np.arange(t0, tf, dt)\nf_t = 45 * 10 ** 3\nw_t = 2 * np.pi * f_t\nA_t = 0.2\ns_t = A_t * np.sin(w_t * t)\nf_r = 12 * 10 ** 3\nw_r = 2 * np.pi * f_r\nA_r = 2\nr_t = A_r * np.sin(w_r * t)\nsgnal = s_t + r_t\nR1 = 290\nC1 = 3.5e-09\nL1 = 0.0035\nk1 = 1\nR2 = 700\nC2 = 3.5e-09\nL2 = 0.00103\nk2 = 1\nalfa_max = 0.9\nalfa_min = 54\nwp_hp = w_t\nws_hp = w_r\nwp_hp_norm = wp_hp / wp_hp\nws_hp_norm = ws_hp / wp_hp\nw0 = np.sqrt(1 / (L1 * C1))\nwp_lp_norm = abs(-1 / wp_hp_norm)\nws_lp_norm = abs(-1 / ws_hp_norm)\neps = np.sqrt(10 ** (alfa_max / 10) - 1)\nN = np.arccosh(np.sqrt((10 ** (alfa_min / 10) - 1) / eps ** 2)) / np.arccosh(\n ws_lp_norm)\nN = np.ceil(N)\nden1_lp = [1, 0.29, 1]\nden2_lp = [1, 0.7, 0.29]\np1_lp = np.roots(den1_lp)\np2_lp = np.roots(den2_lp)\nmy_z_lp = np.array([])\nmy_p_lp = np.concatenate((p1_lp, p2_lp), axis=None)\nmy_k_lp = 1 * 0.29\nNUM, DEN = sig.zpk2tf(my_z_lp, my_p_lp, my_k_lp)\nNUM_lp, DEN_lp = sig.lp2lp(NUM, DEN, w0)\nmy_tf_lp = transf_f(NUM_lp, DEN_lp)\nNUM_hp, DEN_hp = sig.lp2hp(NUM, DEN, w0)\nmy_tf_hp = transf_f(NUM_hp, DEN_hp)\nmy_z_hp, my_p_hp, my_k_hp = sig.tf2zpk(NUM_hp, DEN_hp)\nt, s_filtrada, x = sig.lsim2(my_tf_hp, sgnal, t)\nfig1, axs = plt.subplots(4, 1)\naxs[0].plot(t, s_t)\naxs[0].grid('True')\naxs[0].set_title('Señal Original')\naxs[0].set_ylim(-0.2, 0.2)\naxs[0].set_ylabel('[V]')\naxs[1].plot(t, r_t)\naxs[1].grid('True')\naxs[1].set_title('Ruido Interferente')\naxs[1].set_ylabel('[V]')\naxs[1].set_xlim(0)\naxs[2].plot(t, s_t + r_t)\naxs[2].grid('True')\naxs[2].set_title('Señal a Filtrar')\naxs[2].set_ylabel('[V]')\naxs[2].set_xlim(0)\naxs[3].plot(t, s_filtrada)\naxs[3].grid('True')\naxs[3].set_title('Señal Filtrada')\naxs[3].set_xlabel('t[seg]')\naxs[3].set_ylabel('[V]')\naxs[3].set_ylim(-0.2, 0.2)\naxs[3].set_xlim(0)\nbodePlot(my_tf_lp, 'Filtro Prototipo - Low Pass')\npzmap(my_tf_lp)\nbodePlot(my_tf_hp, 'Filtro Destino - High Pass')\npzmap(my_tf_hp)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 2 21:31:37 2020\r\n\r\n@author: Emmanuel Torres Molina\r\n\"\"\"\r\n\r\n\"\"\"\r\nEjercicio 10 del TP2 de Teoría de los Circuitos II:\r\nUn tono de 45 KHz y 200 mV de amplitud es distorsionada por un tono de 12 KHz \r\ny 2V de amplitud. Diseñar un filtro pasa altos que atenúe la señal\r\ninterferente, de tal forma que el remanente no sea mayor que el 2% de los 200 mV.\r\nLa ganancia en alta frecuencia deberá ser de 0 db y la máxima atenuación\r\nen la banda de paso menor a 1 dB. Emplear la aproximación que necesite menor \r\nnúmero de etapas.\r\nEn este caso el Filtro está Sintetizado por un Estructura RLC Pasiva + RL Pasivo.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.signal import TransferFunction as transf_f\r\nimport scipy.signal as sig\r\nfrom splane import bodePlot, pzmap\r\nfrom matplotlib import pyplot as plt\r\n\r\nplt.close ('all')\r\n\r\n# ---------------------------------------------------------------------------\r\n# ---------------------------------------------------------------------------\r\n\r\n# Vector Tiempo:\r\nt0 = 0.0 # Tiempo Inicial\r\ntf = 0.005\r\ndt = 0.00005 # Incremento\r\nt = np.arange (t0, tf, dt)\r\n\r\n# ---------------------------------------------------------------------------\r\n\r\n# Tono de Interés:\r\n\r\nf_t = 45 * 10**3 # Frecuecia del Tono de mi Interés [Hz]\r\nw_t = 2 * np.pi * f_t # [rad/seg]\r\nA_t = 0.2 # Amplitud de mi Tono [V]\r\n\r\ns_t = A_t * np.sin ( w_t * t )\r\n\r\n# ---------------------------------------------------------------------------\r\n\r\n# Ruido Interferente:\r\n\r\nf_r = 12 * 10**3 # Frecuencia del Ruido Interferente [Hz]\r\nw_r = 2 * np.pi * f_r # [rad/seg]\r\nA_r= 2 # Amplitud del Ruido [V]\r\n\r\nr_t = A_r * np.sin ( w_r * t )\r\n\r\nsgnal = s_t + r_t\r\n\r\n# ---------------------------------------------------------------------------\r\n# ---------------------------------------------------------------------------\r\n\r\n# Valores de los Elementos del Circuito:\r\n \r\n# Etapa 1: RLC Pasivo\r\nR1 = 290\r\nC1 = 3.5e-9\r\nL1 = 3.5e-3\r\nk1 = 1\r\n\r\n# Etapa 2: RL Pasivo\r\nR2 = 700\r\nC2 = 3.5e-9\r\nL2 = 1.03e-3\r\nk2 = 1\r\n\r\n\r\n# ---------------------------------------------------------------------------\r\n# ---------------------------------------------------------------------------\r\n\r\n# Diseño del Filtro: Vamos a Realizar un Filtro High-Pass:\r\n \r\n# Requisitos de Plantilla\r\nalfa_max = 0.9 # Piden que sea menor a 1dB\r\nalfa_min = 54 # el remanente no sea mayor que el 2% de los 200 mV\r\nwp_hp = w_t\r\nws_hp = w_r\r\n\r\n# Normalizo las Pulsaciones Angulares usando como norma: wp_hp\r\nwp_hp_norm = wp_hp / wp_hp\r\nws_hp_norm = ws_hp / wp_hp\r\n\r\nw0 = np.sqrt ( 1 / (L1*C1) )\r\n\r\n# ---------------------------------------------------------------------------\r\n\r\n# Filtro Prototipo Low-Pass: Transformación en Frecuencia: w_HP = -1 / w_LP\r\nwp_lp_norm = abs(-1 / wp_hp_norm)\r\nws_lp_norm = abs(-1 / ws_hp_norm)\r\n\r\n\r\n# Voy a Utilizar Aproximación de Chebyshev para Diseñal el Filtro:\r\n\r\neps = np.sqrt ( (10 **(alfa_max/10) ) - 1 )\r\n\r\n# Orden del Filtro\r\nN = np.arccosh ( np.sqrt ( (10**(alfa_min/10) - 1) / eps**2 ) ) / np.arccosh (ws_lp_norm)\r\nN = np.ceil ( N ) # Redondeo para arriba\r\n\r\nden1_lp = [1, 0.29, 1]\r\nden2_lp = [1, 0.7, 0.29]\r\n\r\np1_lp = np.roots ( den1_lp )\r\np2_lp = np.roots ( den2_lp )\r\n\r\nmy_z_lp = np.array ([])\r\nmy_p_lp = np.concatenate ( (p1_lp, p2_lp), axis = None )\r\nmy_k_lp = 1 * 0.29\r\n\r\nNUM, DEN = sig.zpk2tf ( my_z_lp, my_p_lp, my_k_lp )\r\nNUM_lp, DEN_lp = sig.lp2lp ( NUM, DEN, w0 )\r\n\r\nmy_tf_lp = transf_f (NUM_lp,DEN_lp)\r\n\r\n# ---------------------------------------------------------------------------\r\n\r\n# Filtro Destino - Filtro High-Pass:\r\n \r\n# Calculo W0:\r\n\r\nNUM_hp, DEN_hp = sig.lp2hp ( NUM, DEN, w0 )\r\n\r\nmy_tf_hp = transf_f ( NUM_hp, DEN_hp )\r\n\r\nmy_z_hp, my_p_hp, my_k_hp = sig.tf2zpk (NUM_hp, DEN_hp )\r\n\r\n# ---------------------------------------------------------------------------\r\n# ---------------------------------------------------------------------------\r\n\r\n# Filtrado de la Señal:\r\n \r\nt, s_filtrada, x = sig.lsim2 ((my_tf_hp), sgnal, t )\r\n\r\n# ---------------------------------------------------------------------------\r\n# ---------------------------------------------------------------------------\r\n\r\n# Ploteo de las Señales, Respuesta en Frecuencia, etc.\r\n\r\nfig1, axs = plt.subplots(4,1)\r\n\r\naxs[0].plot ( t, s_t )\r\naxs[0].grid ('True')\r\naxs[0].set_title ('Señal Original')\r\naxs[0].set_ylim(-0.2,0.2)\r\naxs[0].set_ylabel('[V]')\r\n\r\naxs[1].plot ( t, r_t )\r\naxs[1].grid ('True')\r\naxs[1].set_title ('Ruido Interferente')\r\naxs[1].set_ylabel('[V]')\r\naxs[1].set_xlim(0)\r\n\r\naxs[2].plot (t, s_t + r_t )\r\naxs[2].grid ('True')\r\naxs[2].set_title ('Señal a Filtrar')\r\naxs[2].set_ylabel('[V]')\r\naxs[2].set_xlim(0)\r\n\r\naxs[3].plot (t, s_filtrada )\r\naxs[3].grid ('True')\r\naxs[3].set_title ( 'Señal Filtrada' )\r\naxs[3].set_xlabel ('t[seg]')\r\naxs[3].set_ylabel('[V]')\r\naxs[3].set_ylim(-0.2,0.2)\r\naxs[3].set_xlim(0)\r\n\r\n# Respuesta en Frecuencia:\r\nbodePlot (my_tf_lp, 'Filtro Prototipo - Low Pass')\r\npzmap (my_tf_lp)\r\n\r\nbodePlot (my_tf_hp, 'Filtro Destino - High Pass')\r\npzmap (my_tf_hp)\r\n\r\n\r\n\r\n\r\n\r\n \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
This module demonstrates how to use some functionality of python built-in csv module
'''
import csv
def csv_usage():
'''
This function demonstrates how to use csv module to read and write csv files
'''
with open('example.csv', 'r', newline='') as csvfile:
reader_c = csv.reader(csvfile, delimiter=';')
for row in reader_c:
print(', '.join(row))
with open('new-2.csv', 'w', newline='') as csvfile:
writer_c = csv.writer(csvfile, delimiter=',')
writer_c.writerow(['Name', 'Age', 'City'])
writer_c.writerow(['Joe', '25', 'Miami'])
writer_c.writerow(['Nick', '21', 'Mexico'])
if __name__ == '__main__':
csv_usage()
|
normal
|
{
"blob_id": "bcc2977f36ecc775f44ae4251ce230af9abf63ba",
"index": 7362,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef csv_usage():\n \"\"\"\n This function demonstrates how to use csv module to read and write csv files\n \"\"\"\n with open('example.csv', 'r', newline='') as csvfile:\n reader_c = csv.reader(csvfile, delimiter=';')\n for row in reader_c:\n print(', '.join(row))\n with open('new-2.csv', 'w', newline='') as csvfile:\n writer_c = csv.writer(csvfile, delimiter=',')\n writer_c.writerow(['Name', 'Age', 'City'])\n writer_c.writerow(['Joe', '25', 'Miami'])\n writer_c.writerow(['Nick', '21', 'Mexico'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef csv_usage():\n \"\"\"\n This function demonstrates how to use csv module to read and write csv files\n \"\"\"\n with open('example.csv', 'r', newline='') as csvfile:\n reader_c = csv.reader(csvfile, delimiter=';')\n for row in reader_c:\n print(', '.join(row))\n with open('new-2.csv', 'w', newline='') as csvfile:\n writer_c = csv.writer(csvfile, delimiter=',')\n writer_c.writerow(['Name', 'Age', 'City'])\n writer_c.writerow(['Joe', '25', 'Miami'])\n writer_c.writerow(['Nick', '21', 'Mexico'])\n\n\nif __name__ == '__main__':\n csv_usage()\n",
"step-4": "<mask token>\nimport csv\n\n\ndef csv_usage():\n \"\"\"\n This function demonstrates how to use csv module to read and write csv files\n \"\"\"\n with open('example.csv', 'r', newline='') as csvfile:\n reader_c = csv.reader(csvfile, delimiter=';')\n for row in reader_c:\n print(', '.join(row))\n with open('new-2.csv', 'w', newline='') as csvfile:\n writer_c = csv.writer(csvfile, delimiter=',')\n writer_c.writerow(['Name', 'Age', 'City'])\n writer_c.writerow(['Joe', '25', 'Miami'])\n writer_c.writerow(['Nick', '21', 'Mexico'])\n\n\nif __name__ == '__main__':\n csv_usage()\n",
"step-5": "'''\nThis module demonstrates how to use some functionality of python built-in csv module\n'''\nimport csv\n\ndef csv_usage():\n '''\n This function demonstrates how to use csv module to read and write csv files\n '''\n with open('example.csv', 'r', newline='') as csvfile:\n reader_c = csv.reader(csvfile, delimiter=';')\n for row in reader_c:\n print(', '.join(row))\n\n with open('new-2.csv', 'w', newline='') as csvfile:\n writer_c = csv.writer(csvfile, delimiter=',')\n writer_c.writerow(['Name', 'Age', 'City'])\n writer_c.writerow(['Joe', '25', 'Miami'])\n writer_c.writerow(['Nick', '21', 'Mexico'])\n\nif __name__ == '__main__':\n csv_usage()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i * i, limit, i):
a[j] = 0
return ans
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i * i, limit, i):
a[j] = 0
return ans
<|reserved_special_token_0|>
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i * i, limit, i):
a[j] = 0
return ans
is_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x
N = 10 ** 6
p = sieve(N)
ps = set(p)
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
<|reserved_special_token_1|>
import math
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i * i, limit, i):
a[j] = 0
return ans
is_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x
N = 10 ** 6
p = sieve(N)
ps = set(p)
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
<|reserved_special_token_1|>
import math
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i*i, limit, i):
a[j] = 0;
return ans
is_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x
N = 10 ** 6
p = sieve(N)
ps = set(p)
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
|
flexible
|
{
"blob_id": "ff6dc347637a81c9f6a541775646b4901d719790",
"index": 9478,
"step-1": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\n<mask token>\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-3": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\nis_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x\nN = 10 ** 6\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-4": "import math\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\nis_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x\nN = 10 ** 6\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-5": "import math\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i*i, limit, i):\n a[j] = 0;\n return ans\n\nis_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x\n\nN = 10 ** 6\n\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class DuplicateEnvironmentTest(unittest.TestCase):
<|reserved_special_token_0|>
def test_num_envs(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
self.assertEqual(env.num_envs, num_envs)
self.assertEqual((num_envs,), env.reset().shape)
def test_reset(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
state = env.reset()
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.zeros(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
def test_step(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
env.reset()
state = env.step(torch.ones(num_envs, dtype=torch.int32))
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.ones(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DuplicateEnvironmentTest(unittest.TestCase):
def test_env_name(self):
env = DuplicateEnvironment(make_vec_env())
self.assertEqual(env.name, 'CartPole-v0')
def test_num_envs(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
self.assertEqual(env.num_envs, num_envs)
self.assertEqual((num_envs,), env.reset().shape)
def test_reset(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
state = env.reset()
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.zeros(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
def test_step(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
env.reset()
state = env.step(torch.ones(num_envs, dtype=torch.int32))
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.ones(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DuplicateEnvironmentTest(unittest.TestCase):
def test_env_name(self):
env = DuplicateEnvironment(make_vec_env())
self.assertEqual(env.name, 'CartPole-v0')
def test_num_envs(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
self.assertEqual(env.num_envs, num_envs)
self.assertEqual((num_envs,), env.reset().shape)
def test_reset(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
state = env.reset()
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.zeros(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
def test_step(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
env.reset()
state = env.step(torch.ones(num_envs, dtype=torch.int32))
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.ones(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
def test_step_until_done(self):
num_envs = 3
env = DuplicateEnvironment(make_vec_env(num_envs))
env.seed(5)
env.reset()
for _ in range(100):
state = env.step(torch.ones(num_envs, dtype=torch.int32))
if state.done[0]:
break
self.assertEqual(state[0].observation.shape, (4,))
self.assertEqual(state[0].reward, 1.0)
self.assertTrue(state[0].done)
self.assertEqual(state[0].mask, 0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_vec_env(num_envs=3):
env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]
return env
class DuplicateEnvironmentTest(unittest.TestCase):
def test_env_name(self):
env = DuplicateEnvironment(make_vec_env())
self.assertEqual(env.name, 'CartPole-v0')
def test_num_envs(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
self.assertEqual(env.num_envs, num_envs)
self.assertEqual((num_envs,), env.reset().shape)
def test_reset(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
state = env.reset()
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.zeros(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
def test_step(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
env.reset()
state = env.step(torch.ones(num_envs, dtype=torch.int32))
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.ones(num_envs)).all())
self.assertTrue((state.done == torch.zeros(num_envs)).all())
self.assertTrue((state.mask == torch.ones(num_envs)).all())
def test_step_until_done(self):
num_envs = 3
env = DuplicateEnvironment(make_vec_env(num_envs))
env.seed(5)
env.reset()
for _ in range(100):
state = env.step(torch.ones(num_envs, dtype=torch.int32))
if state.done[0]:
break
self.assertEqual(state[0].observation.shape, (4,))
self.assertEqual(state[0].reward, 1.0)
self.assertTrue(state[0].done)
self.assertEqual(state[0].mask, 0)
<|reserved_special_token_1|>
import unittest
import gym
import torch
from all.environments import DuplicateEnvironment, GymEnvironment
def make_vec_env(num_envs=3):
env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]
return env
class DuplicateEnvironmentTest(unittest.TestCase):
def test_env_name(self):
env = DuplicateEnvironment(make_vec_env())
self.assertEqual(env.name, 'CartPole-v0')
def test_num_envs(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
self.assertEqual(env.num_envs, num_envs)
self.assertEqual((num_envs,), env.reset().shape)
def test_reset(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
state = env.reset()
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.zeros(num_envs, )).all())
self.assertTrue((state.done == torch.zeros(num_envs, )).all())
self.assertTrue((state.mask == torch.ones(num_envs, )).all())
def test_step(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
env.reset()
state = env.step(torch.ones(num_envs, dtype=torch.int32))
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.ones(num_envs, )).all())
self.assertTrue((state.done == torch.zeros(num_envs, )).all())
self.assertTrue((state.mask == torch.ones(num_envs, )).all())
def test_step_until_done(self):
num_envs = 3
env = DuplicateEnvironment(make_vec_env(num_envs))
env.seed(5)
env.reset()
for _ in range(100):
state = env.step(torch.ones(num_envs, dtype=torch.int32))
if state.done[0]:
break
self.assertEqual(state[0].observation.shape, (4,))
self.assertEqual(state[0].reward, 1.)
self.assertTrue(state[0].done)
self.assertEqual(state[0].mask, 0)
|
flexible
|
{
"blob_id": "e01eced7c43aae354047fbf29028c601d1daae50",
"index": 9636,
"step-1": "<mask token>\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n <mask token>\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step_until_done(self):\n num_envs = 3\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.seed(5)\n env.reset()\n for _ in range(100):\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n if state.done[0]:\n break\n self.assertEqual(state[0].observation.shape, (4,))\n self.assertEqual(state[0].reward, 1.0)\n self.assertTrue(state[0].done)\n self.assertEqual(state[0].mask, 0)\n",
"step-4": "<mask token>\n\n\ndef make_vec_env(num_envs=3):\n env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]\n return env\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step_until_done(self):\n num_envs = 3\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.seed(5)\n env.reset()\n for _ in range(100):\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n if state.done[0]:\n break\n self.assertEqual(state[0].observation.shape, (4,))\n self.assertEqual(state[0].reward, 1.0)\n self.assertTrue(state[0].done)\n self.assertEqual(state[0].mask, 0)\n",
"step-5": "import unittest\nimport gym\nimport torch\nfrom all.environments import DuplicateEnvironment, GymEnvironment\n\n\ndef make_vec_env(num_envs=3):\n env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]\n return env\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs, )).all())\n self.assertTrue((state.done == torch.zeros(num_envs, )).all())\n self.assertTrue((state.mask == torch.ones(num_envs, )).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs, )).all())\n self.assertTrue((state.done == torch.zeros(num_envs, )).all())\n self.assertTrue((state.mask == torch.ones(num_envs, )).all())\n\n def test_step_until_done(self):\n num_envs = 3\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.seed(5)\n env.reset()\n for _ in range(100):\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n if state.done[0]:\n break\n self.assertEqual(state[0].observation.shape, (4,))\n self.assertEqual(state[0].reward, 1.)\n self.assertTrue(state[0].done)\n self.assertEqual(state[0].mask, 0)\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GatesLineGeometry(FixtureTest):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GatesLineGeometry(FixtureTest):
def test_linear_gate(self):
self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])
self.assert_has_feature(16, 10482, 25335, 'landuse', {'id':
391260223, 'kind': 'gate'})
<|reserved_special_token_1|>
from . import FixtureTest
class GatesLineGeometry(FixtureTest):
def test_linear_gate(self):
self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])
self.assert_has_feature(16, 10482, 25335, 'landuse', {'id':
391260223, 'kind': 'gate'})
<|reserved_special_token_1|>
from . import FixtureTest
class GatesLineGeometry(FixtureTest):
def test_linear_gate(self):
# Add barrier:gates with line geometries in landuse
# Line barrier:ghate feature
self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])
self.assert_has_feature(
16, 10482, 25335, 'landuse',
{'id': 391260223, 'kind': 'gate'})
|
flexible
|
{
"blob_id": "2192e328bdfa454ff1d1f66a05fb6a322c48b244",
"index": 2847,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GatesLineGeometry(FixtureTest):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GatesLineGeometry(FixtureTest):\n\n def test_linear_gate(self):\n self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])\n self.assert_has_feature(16, 10482, 25335, 'landuse', {'id': \n 391260223, 'kind': 'gate'})\n",
"step-4": "from . import FixtureTest\n\n\nclass GatesLineGeometry(FixtureTest):\n\n def test_linear_gate(self):\n self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])\n self.assert_has_feature(16, 10482, 25335, 'landuse', {'id': \n 391260223, 'kind': 'gate'})\n",
"step-5": "from . import FixtureTest\n\n\nclass GatesLineGeometry(FixtureTest):\n def test_linear_gate(self):\n # Add barrier:gates with line geometries in landuse\n # Line barrier:ghate feature\n self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])\n\n self.assert_has_feature(\n 16, 10482, 25335, 'landuse',\n {'id': 391260223, 'kind': 'gate'})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# pyOCD debugger
# Copyright (c) 2006-2013,2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..family.target_kinetis import Kinetis
from ..family.flash_kinetis import Flash_Kinetis
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x09032200, 0xd373428b, 0x428b0a03, 0x0b03d358, 0xd33c428b, 0x428b0c03, 0xe012d321, 0x430b4603,
0x2200d47f, 0x428b0843, 0x0903d374, 0xd35f428b, 0x428b0a03, 0x0b03d344, 0xd328428b, 0x428b0c03,
0x22ffd30d, 0xba120209, 0x428b0c03, 0x1212d302, 0xd0650209, 0x428b0b03, 0xe000d319, 0x0bc30a09,
0xd301428b, 0x1ac003cb, 0x0b834152, 0xd301428b, 0x1ac0038b, 0x0b434152, 0xd301428b, 0x1ac0034b,
0x0b034152, 0xd301428b, 0x1ac0030b, 0x0ac34152, 0xd301428b, 0x1ac002cb, 0x0a834152, 0xd301428b,
0x1ac0028b, 0x0a434152, 0xd301428b, 0x1ac0024b, 0x0a034152, 0xd301428b, 0x1ac0020b, 0xd2cd4152,
0x428b09c3, 0x01cbd301, 0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301,
0x41521ac0, 0x428b0903, 0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883,
0x008bd301, 0x41521ac0, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x41524601, 0x47704610,
0x0fcae05d, 0x4249d000, 0xd3001003, 0x40534240, 0x469c2200, 0x428b0903, 0x0a03d32d, 0xd312428b,
0x018922fc, 0x0a03ba12, 0xd30c428b, 0x11920189, 0xd308428b, 0x11920189, 0xd304428b, 0xd03a0189,
0xe0001192, 0x09c30989, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152,
0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb,
0x08834152, 0xd301428b, 0x1ac0008b, 0xd2d94152, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41,
0x46634601, 0x105b4152, 0xd3014610, 0x2b004240, 0x4249d500, 0x46634770, 0xd300105b, 0xb5014240,
0x46c02000, 0xbd0246c0, 0xb510480a, 0x44484908, 0xf8fcf000, 0xd1042800, 0x21004806, 0xf0004448,
0x4a05f9c9, 0x230168d1, 0x4319029b, 0xbd1060d1, 0x6b65666b, 0x00000004, 0xf0003000, 0x4c0cb570,
0x444c4605, 0x4b0b4601, 0x68e24620, 0xf8a4f000, 0xd1052800, 0x46292300, 0x68e24620, 0xf96ef000,
0x68ca4905, 0x029b2301, 0x60ca431a, 0x0000bd70, 0x00000004, 0x6b65666b, 0xf0003000, 0x4809b510,
0x81c14907, 0x81c14908, 0x08498801, 0x80010049, 0x44484806, 0xf8f2f000, 0xd0002800, 0xbd102001,
0x0000c520, 0x40052000, 0x0000d928, 0x00000004, 0x460cb570, 0x4606460b, 0x480d4601, 0x4615b084,
0xf0004448, 0x2800f903, 0x9001d10a, 0x21019002, 0x91004807, 0x4622462b, 0x44484631, 0xf978f000,
0x68ca4904, 0x029b2301, 0x60ca431a, 0xbd70b004, 0x00000004, 0xf0003000, 0x47702000, 0xd0082800,
0xd802290f, 0xd1042a04, 0x2913e005, 0x2a08d801, 0x2004d001, 0x20004770, 0x28004770, 0x2004d101,
0xb4104770, 0x460c1e5b, 0xd101421c, 0xd002421a, 0x2065bc10, 0x68034770, 0xd804428b, 0x18896840,
0x42881818, 0xbc10d202, 0x47702066, 0x2000bc10, 0x00004770, 0x42884903, 0x206bd001, 0x20004770,
0x00004770, 0x6b65666b, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501,
0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x4605b5f8, 0x460c4616,
0xf7ff4618, 0x2800ffd7, 0x2304d12b, 0x46214632, 0xf7ff4628, 0x0007ffb2, 0x19a6d123, 0x68e91e76,
0x91004630, 0xfe2cf7ff, 0xd0032900, 0x1c409e00, 0x1e764346, 0xd81342b4, 0x4478480a, 0x60046800,
0x20094909, 0xf7ff71c8, 0x4607ffbf, 0x280069a8, 0x4780d000, 0xd1032f00, 0x190468e8, 0xd9eb42b4,
0xbdf84638, 0x0000027a, 0x40020000, 0x4604b510, 0xf7ff4608, 0x2800ff9f, 0x2c00d106, 0x4904d005,
0x71c82044, 0xffa0f7ff, 0x2004bd10, 0x0000bd10, 0x40020000, 0xd00c2800, 0xd00a2a00, 0xd21a2908,
0x447b000b, 0x18db791b, 0x0705449f, 0x0d0b0907, 0x2004110f, 0x68c04770, 0x6840e00a, 0x6880e008,
0x6800e006, 0x2001e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770, 0x00004770, 0xd00a2800,
0x68c9490f, 0x0e094a0f, 0x447a0049, 0x03095a51, 0x2064d103, 0x20044770, 0xb4104770, 0x60032300,
0x21016041, 0x02896081, 0x490760c1, 0x158a7a0c, 0x610240a2, 0x61837ac9, 0xbc106141, 0x47704618,
0x40048040, 0x000001aa, 0x40020020, 0xd1012a00, 0x47702004, 0x461cb5ff, 0x4615b081, 0x2304460e,
0x98014622, 0xff19f7ff, 0xd1190007, 0xd0162c00, 0x4478480c, 0x600e6801, 0x6800cd02, 0x490a6041,
0x71c82006, 0xff30f7ff, 0x98014607, 0x28006980, 0x4780d000, 0xd1022f00, 0x1f241d36, 0x4638d1e8,
0xbdf0b005, 0x00000162, 0x40020000, 0xd0022800, 0x20006181, 0x20044770, 0x00004770, 0xb081b5ff,
0x460e4614, 0x23044605, 0xfee7f7ff, 0xd12a2800, 0x686868a9, 0xfd64f7ff, 0x42719000, 0x40014240,
0x42b7424f, 0x9800d101, 0x2c00183f, 0x1bbdd01a, 0xd90042a5, 0x490d4625, 0x447908a8, 0x600e6809,
0x2201490b, 0x0a0271ca, 0x728872ca, 0x72489804, 0xfeeaf7ff, 0xd1062800, 0x1b649800, 0x183f1976,
0xd1e42c00, 0xb0052000, 0x0000bdf0, 0x000000da, 0x40020000, 0xd1012800, 0x47702004, 0x4803b510,
0x71c22240, 0xf7ff7181, 0xbd10fecf, 0x40020000, 0xd1012b00, 0x47702004, 0x461cb5f8, 0x460e4615,
0x9f082304, 0xfe99f7ff, 0xd1192800, 0xd0172d00, 0x447a4a0f, 0x60066810, 0x2102480e, 0x990671c1,
0x681172c1, 0x60886820, 0xfeaef7ff, 0xd0082800, 0x29009907, 0x600ed000, 0xd0012f00, 0x60392100,
0x1f2dbdf8, 0x1d361d24, 0xd1e12d00, 0x0000bdf8, 0x00000062, 0x40020000, 0x00040002, 0x00080000,
0x00100000, 0x00200000, 0x00400000, 0x00000000, 0x00000000, 0x00200000, 0x40020004, 0x00000000,
],
'pc_init' : 0x2000027D,
'pc_unInit': 0x200002F9,
'pc_program_page': 0x200002B1,
'pc_erase_sector': 0x2000023D,
'pc_eraseAll' : 0x20000209,
'static_base' : 0x20000000 + 0x00000020 + 0x0000063c,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering
'min_program_length' : 4,
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff800
}
class KV11Z7(Kinetis):
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x20000, blocksize=0x400, is_boot_memory=True,
algo=FLASH_ALGO, flash_class=Flash_Kinetis),
RamRegion( start=0x1ffff000, length=0x4000)
)
def __init__(self, session):
super(KV11Z7, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("MKV11Z7.svd")
|
normal
|
{
"blob_id": "58aa72588357b18ab42391dfffbf2a1b66589edd",
"index": 552,
"step-1": "<mask token>\n\n\nclass KV11Z7(Kinetis):\n <mask token>\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"step-2": "<mask token>\n\n\nclass KV11Z7(Kinetis):\n MEMORY_MAP = MemoryMap(FlashRegion(start=0, length=131072, blocksize=\n 1024, is_boot_memory=True, algo=FLASH_ALGO, flash_class=\n Flash_Kinetis), RamRegion(start=536866816, length=16384))\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"step-3": "<mask token>\nFLASH_ALGO = {'load_address': 536870912, 'instructions': [3758800384, \n 103643149, 604520552, 3539992640, 509886552, 474599930, 704650834, \n 1198576114, 151200256, 3547546251, 1116408323, 184800088, 3543941771, \n 1116408835, 3759330081, 1124812291, 570479743, 1116407875, 151245684, \n 3546235531, 1116408323, 184800068, 3542631051, 1116408835, 587191053, \n 3121742345, 1116408835, 303223554, 3496280585, 1116408579, 3758150425, \n 197331465, 3540075147, 448791499, 193151314, 3540075147, 448791435, \n 188957010, 3540075147, 448791371, 184762706, 3540075147, 448791307, \n 180568402, 3540075147, 448791243, 176374098, 3540075147, 448791179, \n 172179794, 3540075147, 448791115, 167985490, 3540075147, 448791051, \n 3536666962, 1116408259, 30135041, 1095899840, 1116408195, 25940737, \n 1095899840, 1116408131, 21746433, 1095899840, 1116408067, 17552129, \n 1095899840, 1116408003, 13357825, 1095899840, 1116407939, 9163521, \n 1095899840, 1116407875, 4969217, 1095899840, 3523222081, 1095910913, \n 1198540304, 264953949, 1112133632, 3539996675, 1079198272, 1184637440, \n 1116408067, 168022829, 3541189259, 25764604, 168016402, 3540796043, \n 294781321, 3540533899, 294781321, 3540271755, 3493462409, 3758100882, \n 163776905, 3540075147, 448790987, 159596882, 3540075147, 448790923, \n 155402578, 3540075147, 448790859, 151208274, 3540075147, 448790795, \n 147013970, 3540075147, 448790731, 142819666, 3540075147, 448790667, \n 3537453394, 1116407875, 4969217, 1095899840, 3523222081, 1180911105, \n 274415954, 3540076048, 721437248, 1112134912, 1180911472, 3539996763, \n 3036758592, 1186996224, 3171043008, 3037743114, 1145587976, 4177326080,\n 3506710528, 553666566, 4026549320, 1241905609, 587294929, 1125712539, \n 3171967185, 1801807467, 4, 4026544128, 1275901296, 1145849349, \n 1259030017, 1759659552, 4171558912, 3506776064, 1177101056, 1759659552,\n 4184797184, 1758087429, 43721473, 1623868186, 48496, 4, 1801807467, \n 4026544128, 1208595728, 2176928007, 2176928008, 139036673, 2147549257, \n 1145587718, 4176670720, 3489671168, 3171950593, 50464, 1074077696, \n 55592, 4, 1175238000, 1174816267, 1208829441, 1175826564, 4026549320, \n 671152387, 2416038154, 553750530, 2432714759, 1176651307, 1145587249, \n 4185452544, 1758087428, 43721473, 1623868186, 3178278916, 4, 4026544128,\n 1198530560, 3490195456, 3624020239, 3506711044, 689168389, 705222657, \n 537186305, 536889200, 671106928, 537186561, 3020965744, 1175199323, \n 3506520604, 3489808922, 543538192, 1745045360, 3624157835, 411658304, \n 1116215320, 3155218946, 1198530662, 536919056, 18288, 1116227843, \n 543936513, 536889200, 18288, 1801807467, 561006602, 562065409, \n 2013360129, 3590063625, 109148160, 543675649, 113330032, 543741185, \n 130041712, 543805692, 18288, 1073872896, 1174779384, 1175209494, \n 4160701976, 671154135, 587518251, 1176585778, 4160701992, 524210, \n 430362915, 1760108150, 2432714288, 4264359935, 3489868032, 473996800, \n 511066950, 3625140916, 1148733450, 1610901504, 537479433, 4160713160, \n 1174929343, 671115688, 1199624192, 3506646784, 419719400, 3656073908, \n 3187164728, 634, 1073872896, 1174713616, 4160701960, 671154079, \n 738251014, 1225052165, 1908940868, 4288739327, 537181456, 48400, \n 1073872896, 3490457600, 3490327040, 3524929800, 1148911627, 417036571, \n 117785759, 218827015, 537137423, 1757431664, 1749082122, 1753276424, \n 1744887814, 536993796, 1761665026, 1765859328, 536895504, 543836016, \n 18288, 3490326528, 1758021903, 235489807, 1148846153, 50944593, \n 543478019, 537151344, 3020965744, 1610818304, 553738305, 42557569, \n 1225220289, 361396748, 1627537570, 1636006601, 3155190081, 1198540312, \n 1074036800, 426, 1073872928, 3506514432, 1198530564, 1176286719, \n 1175826561, 587482638, 2550220322, 4279891967, 3508076551, 3491113984, \n 1148733452, 1611556865, 1744882946, 1225416769, 1908940806, 4281399295,\n 2550220295, 671115648, 1199624192, 3506581248, 522460470, 1178128872, \n 3186667525, 354, 1073872896, 3489802240, 536895873, 537151344, 18288, \n 2961290751, 1175340564, 587482629, 4276615167, 3509200896, 1751673001, \n 4251252735, 1114738688, 1073824320, 1119306319, 2550190337, 738203711, \n 465424410, 3640672933, 1225606693, 1148782760, 1611556873, 570509579, \n 167932362, 1921544906, 1917360132, 4276811775, 3506841600, 459577344, \n 406788470, 3521391616, 2953125888, 48624, 218, 1073872896, 3506513920, \n 1198530564, 1208202512, 1908548160, 4160713089, 3172007631, 1073872896,\n 3506514688, 1198530564, 1176286712, 1175340565, 2668110596, 4271503359,\n 3508086784, 3491179776, 1148865039, 1611032592, 553797646, 2567336385, \n 1745973953, 1619552288, 4272879615, 3490195456, 687905031, 1611583488, \n 3489738496, 1614356736, 523091448, 490085668, 3521195264, 48632, 98, \n 1073872896, 262146, 524288, 1048576, 2097152, 4194304, 0, 0, 2097152, \n 1073872900, 0], 'pc_init': 536871549, 'pc_unInit': 536871673,\n 'pc_program_page': 536871601, 'pc_erase_sector': 536871485,\n 'pc_eraseAll': 536871433, 'static_base': 536870912 + 32 + 1596,\n 'begin_stack': 536870912 + 2048, 'begin_data': 536870912 + 2560,\n 'page_buffers': [536873472, 536875520], 'min_program_length': 4,\n 'analyzer_supported': True, 'analyzer_address': 536868864}\n\n\nclass KV11Z7(Kinetis):\n MEMORY_MAP = MemoryMap(FlashRegion(start=0, length=131072, blocksize=\n 1024, is_boot_memory=True, algo=FLASH_ALGO, flash_class=\n Flash_Kinetis), RamRegion(start=536866816, length=16384))\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"step-4": "from ..family.target_kinetis import Kinetis\nfrom ..family.flash_kinetis import Flash_Kinetis\nfrom ...core.memory_map import FlashRegion, RamRegion, MemoryMap\nfrom ...debug.svd.loader import SVDFile\nFLASH_ALGO = {'load_address': 536870912, 'instructions': [3758800384, \n 103643149, 604520552, 3539992640, 509886552, 474599930, 704650834, \n 1198576114, 151200256, 3547546251, 1116408323, 184800088, 3543941771, \n 1116408835, 3759330081, 1124812291, 570479743, 1116407875, 151245684, \n 3546235531, 1116408323, 184800068, 3542631051, 1116408835, 587191053, \n 3121742345, 1116408835, 303223554, 3496280585, 1116408579, 3758150425, \n 197331465, 3540075147, 448791499, 193151314, 3540075147, 448791435, \n 188957010, 3540075147, 448791371, 184762706, 3540075147, 448791307, \n 180568402, 3540075147, 448791243, 176374098, 3540075147, 448791179, \n 172179794, 3540075147, 448791115, 167985490, 3540075147, 448791051, \n 3536666962, 1116408259, 30135041, 1095899840, 1116408195, 25940737, \n 1095899840, 1116408131, 21746433, 1095899840, 1116408067, 17552129, \n 1095899840, 1116408003, 13357825, 1095899840, 1116407939, 9163521, \n 1095899840, 1116407875, 4969217, 1095899840, 3523222081, 1095910913, \n 1198540304, 264953949, 1112133632, 3539996675, 1079198272, 1184637440, \n 1116408067, 168022829, 3541189259, 25764604, 168016402, 3540796043, \n 294781321, 3540533899, 294781321, 3540271755, 3493462409, 3758100882, \n 163776905, 3540075147, 448790987, 159596882, 3540075147, 448790923, \n 155402578, 3540075147, 448790859, 151208274, 3540075147, 448790795, \n 147013970, 3540075147, 448790731, 142819666, 3540075147, 448790667, \n 3537453394, 1116407875, 4969217, 1095899840, 3523222081, 1180911105, \n 274415954, 3540076048, 721437248, 1112134912, 1180911472, 3539996763, \n 3036758592, 1186996224, 3171043008, 3037743114, 1145587976, 4177326080,\n 3506710528, 553666566, 4026549320, 1241905609, 587294929, 1125712539, \n 3171967185, 1801807467, 4, 4026544128, 1275901296, 1145849349, \n 1259030017, 1759659552, 4171558912, 3506776064, 1177101056, 1759659552,\n 4184797184, 1758087429, 43721473, 1623868186, 48496, 4, 1801807467, \n 4026544128, 1208595728, 2176928007, 2176928008, 139036673, 2147549257, \n 1145587718, 4176670720, 3489671168, 3171950593, 50464, 1074077696, \n 55592, 4, 1175238000, 1174816267, 1208829441, 1175826564, 4026549320, \n 671152387, 2416038154, 553750530, 2432714759, 1176651307, 1145587249, \n 4185452544, 1758087428, 43721473, 1623868186, 3178278916, 4, 4026544128,\n 1198530560, 3490195456, 3624020239, 3506711044, 689168389, 705222657, \n 537186305, 536889200, 671106928, 537186561, 3020965744, 1175199323, \n 3506520604, 3489808922, 543538192, 1745045360, 3624157835, 411658304, \n 1116215320, 3155218946, 1198530662, 536919056, 18288, 1116227843, \n 543936513, 536889200, 18288, 1801807467, 561006602, 562065409, \n 2013360129, 3590063625, 109148160, 543675649, 113330032, 543741185, \n 130041712, 543805692, 18288, 1073872896, 1174779384, 1175209494, \n 4160701976, 671154135, 587518251, 1176585778, 4160701992, 524210, \n 430362915, 1760108150, 2432714288, 4264359935, 3489868032, 473996800, \n 511066950, 3625140916, 1148733450, 1610901504, 537479433, 4160713160, \n 1174929343, 671115688, 1199624192, 3506646784, 419719400, 3656073908, \n 3187164728, 634, 1073872896, 1174713616, 4160701960, 671154079, \n 738251014, 1225052165, 1908940868, 4288739327, 537181456, 48400, \n 1073872896, 3490457600, 3490327040, 3524929800, 1148911627, 417036571, \n 117785759, 218827015, 537137423, 1757431664, 1749082122, 1753276424, \n 1744887814, 536993796, 1761665026, 1765859328, 536895504, 543836016, \n 18288, 3490326528, 1758021903, 235489807, 1148846153, 50944593, \n 543478019, 537151344, 3020965744, 1610818304, 553738305, 42557569, \n 1225220289, 361396748, 1627537570, 1636006601, 3155190081, 1198540312, \n 1074036800, 426, 1073872928, 3506514432, 1198530564, 1176286719, \n 1175826561, 587482638, 2550220322, 4279891967, 3508076551, 3491113984, \n 1148733452, 1611556865, 1744882946, 1225416769, 1908940806, 4281399295,\n 2550220295, 671115648, 1199624192, 3506581248, 522460470, 1178128872, \n 3186667525, 354, 1073872896, 3489802240, 536895873, 537151344, 18288, \n 2961290751, 1175340564, 587482629, 4276615167, 3509200896, 1751673001, \n 4251252735, 1114738688, 1073824320, 1119306319, 2550190337, 738203711, \n 465424410, 3640672933, 1225606693, 1148782760, 1611556873, 570509579, \n 167932362, 1921544906, 1917360132, 4276811775, 3506841600, 459577344, \n 406788470, 3521391616, 2953125888, 48624, 218, 1073872896, 3506513920, \n 1198530564, 1208202512, 1908548160, 4160713089, 3172007631, 1073872896,\n 3506514688, 1198530564, 1176286712, 1175340565, 2668110596, 4271503359,\n 3508086784, 3491179776, 1148865039, 1611032592, 553797646, 2567336385, \n 1745973953, 1619552288, 4272879615, 3490195456, 687905031, 1611583488, \n 3489738496, 1614356736, 523091448, 490085668, 3521195264, 48632, 98, \n 1073872896, 262146, 524288, 1048576, 2097152, 4194304, 0, 0, 2097152, \n 1073872900, 0], 'pc_init': 536871549, 'pc_unInit': 536871673,\n 'pc_program_page': 536871601, 'pc_erase_sector': 536871485,\n 'pc_eraseAll': 536871433, 'static_base': 536870912 + 32 + 1596,\n 'begin_stack': 536870912 + 2048, 'begin_data': 536870912 + 2560,\n 'page_buffers': [536873472, 536875520], 'min_program_length': 4,\n 'analyzer_supported': True, 'analyzer_address': 536868864}\n\n\nclass KV11Z7(Kinetis):\n MEMORY_MAP = MemoryMap(FlashRegion(start=0, length=131072, blocksize=\n 1024, is_boot_memory=True, algo=FLASH_ALGO, flash_class=\n Flash_Kinetis), RamRegion(start=536866816, length=16384))\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"step-5": "# pyOCD debugger\n# Copyright (c) 2006-2013,2018 Arm Limited\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ..family.target_kinetis import Kinetis\nfrom ..family.flash_kinetis import Flash_Kinetis\nfrom ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)\nfrom ...debug.svd.loader import SVDFile\n\nFLASH_ALGO = { 'load_address' : 0x20000000,\n 'instructions' : [\n 0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,\n 0x09032200, 0xd373428b, 0x428b0a03, 0x0b03d358, 0xd33c428b, 0x428b0c03, 0xe012d321, 0x430b4603,\n 0x2200d47f, 0x428b0843, 0x0903d374, 0xd35f428b, 0x428b0a03, 0x0b03d344, 0xd328428b, 0x428b0c03,\n 0x22ffd30d, 0xba120209, 0x428b0c03, 0x1212d302, 0xd0650209, 0x428b0b03, 0xe000d319, 0x0bc30a09,\n 0xd301428b, 0x1ac003cb, 0x0b834152, 0xd301428b, 0x1ac0038b, 0x0b434152, 0xd301428b, 0x1ac0034b,\n 0x0b034152, 0xd301428b, 0x1ac0030b, 0x0ac34152, 0xd301428b, 0x1ac002cb, 0x0a834152, 0xd301428b,\n 0x1ac0028b, 0x0a434152, 0xd301428b, 0x1ac0024b, 0x0a034152, 0xd301428b, 0x1ac0020b, 0xd2cd4152,\n 0x428b09c3, 0x01cbd301, 0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301,\n 0x41521ac0, 0x428b0903, 0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883,\n 0x008bd301, 0x41521ac0, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x41524601, 0x47704610,\n 0x0fcae05d, 0x4249d000, 0xd3001003, 0x40534240, 0x469c2200, 0x428b0903, 0x0a03d32d, 0xd312428b,\n 0x018922fc, 0x0a03ba12, 0xd30c428b, 0x11920189, 0xd308428b, 0x11920189, 0xd304428b, 0xd03a0189,\n 0xe0001192, 0x09c30989, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152,\n 0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb,\n 0x08834152, 0xd301428b, 0x1ac0008b, 0xd2d94152, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41,\n 0x46634601, 0x105b4152, 0xd3014610, 0x2b004240, 0x4249d500, 0x46634770, 0xd300105b, 0xb5014240,\n 0x46c02000, 0xbd0246c0, 0xb510480a, 0x44484908, 0xf8fcf000, 0xd1042800, 0x21004806, 0xf0004448,\n 0x4a05f9c9, 0x230168d1, 0x4319029b, 0xbd1060d1, 0x6b65666b, 0x00000004, 0xf0003000, 0x4c0cb570,\n 0x444c4605, 0x4b0b4601, 0x68e24620, 0xf8a4f000, 0xd1052800, 0x46292300, 0x68e24620, 0xf96ef000,\n 0x68ca4905, 0x029b2301, 0x60ca431a, 0x0000bd70, 0x00000004, 0x6b65666b, 0xf0003000, 0x4809b510,\n 0x81c14907, 0x81c14908, 0x08498801, 0x80010049, 0x44484806, 0xf8f2f000, 0xd0002800, 0xbd102001,\n 0x0000c520, 0x40052000, 0x0000d928, 0x00000004, 0x460cb570, 0x4606460b, 0x480d4601, 0x4615b084,\n 0xf0004448, 0x2800f903, 0x9001d10a, 0x21019002, 0x91004807, 0x4622462b, 0x44484631, 0xf978f000,\n 0x68ca4904, 0x029b2301, 0x60ca431a, 0xbd70b004, 0x00000004, 0xf0003000, 0x47702000, 0xd0082800,\n 0xd802290f, 0xd1042a04, 0x2913e005, 0x2a08d801, 0x2004d001, 0x20004770, 0x28004770, 0x2004d101,\n 0xb4104770, 0x460c1e5b, 0xd101421c, 0xd002421a, 0x2065bc10, 0x68034770, 0xd804428b, 0x18896840,\n 0x42881818, 0xbc10d202, 0x47702066, 0x2000bc10, 0x00004770, 0x42884903, 0x206bd001, 0x20004770,\n 0x00004770, 0x6b65666b, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501,\n 0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x4605b5f8, 0x460c4616,\n 0xf7ff4618, 0x2800ffd7, 0x2304d12b, 0x46214632, 0xf7ff4628, 0x0007ffb2, 0x19a6d123, 0x68e91e76,\n 0x91004630, 0xfe2cf7ff, 0xd0032900, 0x1c409e00, 0x1e764346, 0xd81342b4, 0x4478480a, 0x60046800,\n 0x20094909, 0xf7ff71c8, 0x4607ffbf, 0x280069a8, 0x4780d000, 0xd1032f00, 0x190468e8, 0xd9eb42b4,\n 0xbdf84638, 0x0000027a, 0x40020000, 0x4604b510, 0xf7ff4608, 0x2800ff9f, 0x2c00d106, 0x4904d005,\n 0x71c82044, 0xffa0f7ff, 0x2004bd10, 0x0000bd10, 0x40020000, 0xd00c2800, 0xd00a2a00, 0xd21a2908,\n 0x447b000b, 0x18db791b, 0x0705449f, 0x0d0b0907, 0x2004110f, 0x68c04770, 0x6840e00a, 0x6880e008,\n 0x6800e006, 0x2001e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770, 0x00004770, 0xd00a2800,\n 0x68c9490f, 0x0e094a0f, 0x447a0049, 0x03095a51, 0x2064d103, 0x20044770, 0xb4104770, 0x60032300,\n 0x21016041, 0x02896081, 0x490760c1, 0x158a7a0c, 0x610240a2, 0x61837ac9, 0xbc106141, 0x47704618,\n 0x40048040, 0x000001aa, 0x40020020, 0xd1012a00, 0x47702004, 0x461cb5ff, 0x4615b081, 0x2304460e,\n 0x98014622, 0xff19f7ff, 0xd1190007, 0xd0162c00, 0x4478480c, 0x600e6801, 0x6800cd02, 0x490a6041,\n 0x71c82006, 0xff30f7ff, 0x98014607, 0x28006980, 0x4780d000, 0xd1022f00, 0x1f241d36, 0x4638d1e8,\n 0xbdf0b005, 0x00000162, 0x40020000, 0xd0022800, 0x20006181, 0x20044770, 0x00004770, 0xb081b5ff,\n 0x460e4614, 0x23044605, 0xfee7f7ff, 0xd12a2800, 0x686868a9, 0xfd64f7ff, 0x42719000, 0x40014240,\n 0x42b7424f, 0x9800d101, 0x2c00183f, 0x1bbdd01a, 0xd90042a5, 0x490d4625, 0x447908a8, 0x600e6809,\n 0x2201490b, 0x0a0271ca, 0x728872ca, 0x72489804, 0xfeeaf7ff, 0xd1062800, 0x1b649800, 0x183f1976,\n 0xd1e42c00, 0xb0052000, 0x0000bdf0, 0x000000da, 0x40020000, 0xd1012800, 0x47702004, 0x4803b510,\n 0x71c22240, 0xf7ff7181, 0xbd10fecf, 0x40020000, 0xd1012b00, 0x47702004, 0x461cb5f8, 0x460e4615,\n 0x9f082304, 0xfe99f7ff, 0xd1192800, 0xd0172d00, 0x447a4a0f, 0x60066810, 0x2102480e, 0x990671c1,\n 0x681172c1, 0x60886820, 0xfeaef7ff, 0xd0082800, 0x29009907, 0x600ed000, 0xd0012f00, 0x60392100,\n 0x1f2dbdf8, 0x1d361d24, 0xd1e12d00, 0x0000bdf8, 0x00000062, 0x40020000, 0x00040002, 0x00080000,\n 0x00100000, 0x00200000, 0x00400000, 0x00000000, 0x00000000, 0x00200000, 0x40020004, 0x00000000,\n\n ],\n\n 'pc_init' : 0x2000027D,\n 'pc_unInit': 0x200002F9,\n 'pc_program_page': 0x200002B1,\n 'pc_erase_sector': 0x2000023D,\n 'pc_eraseAll' : 0x20000209,\n\n 'static_base' : 0x20000000 + 0x00000020 + 0x0000063c,\n 'begin_stack' : 0x20000000 + 0x00000800,\n 'begin_data' : 0x20000000 + 0x00000A00,\n 'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering\n 'min_program_length' : 4,\n 'analyzer_supported' : True,\n 'analyzer_address' : 0x1ffff800\n }\n\nclass KV11Z7(Kinetis):\n\n MEMORY_MAP = MemoryMap(\n FlashRegion( start=0, length=0x20000, blocksize=0x400, is_boot_memory=True,\n algo=FLASH_ALGO, flash_class=Flash_Kinetis),\n RamRegion( start=0x1ffff000, length=0x4000)\n )\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin(\"MKV11Z7.svd\")\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
/home/mitchellwoodbine/Documents/github/getargs/GetArgs.py
|
normal
|
{
"blob_id": "0065a493767a2080a20f8b55f76ddeae92dc27f1",
"index": 3359,
"step-1": "/home/mitchellwoodbine/Documents/github/getargs/GetArgs.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def klist(**kwargs):
kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if
not x.is_demo()], 'admins': User.objects.filter(status=2)})
return kwargs
<|reserved_special_token_0|>
@must_be_admin
def account(request, account_id):
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
class AccountBaseForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'surname', 'number']
widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}
if request.method == 'POST':
form = AccountBaseForm(request.POST, instance=acc)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, u'Zapisano.')
else:
form = AccountBaseForm(instance=acc)
if acc.status != 0:
return render_to_response('radmin/manage_accounts_acc.html',
request, **klist(account=acc, selected_user_id=acc.id, form=form))
else:
return render_to_response('radmin/manage_accounts_students_acc.html',
request, account=acc, selected_user_id=acc.id, form=form, page=
Paginator(User.objects.filter(status=0).order_by('surname',
'name'), 30).page(1))
<|reserved_special_token_0|>
@must_be_admin
def view_students(request, page='1'):
page = int(page)
students = User.objects.filter(status=0).order_by('surname', 'name')
students = [x for x in students if not x.is_demo()]
p = Paginator(students, 30)
cpage = p.page(page)
return render_to_response('radmin/manage_accounts_students_list.html',
request, page=cpage)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def klist(**kwargs):
kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if
not x.is_demo()], 'admins': User.objects.filter(status=2)})
return kwargs
<|reserved_special_token_0|>
@must_be_admin
def account(request, account_id):
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
class AccountBaseForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'surname', 'number']
widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}
if request.method == 'POST':
form = AccountBaseForm(request.POST, instance=acc)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, u'Zapisano.')
else:
form = AccountBaseForm(instance=acc)
if acc.status != 0:
return render_to_response('radmin/manage_accounts_acc.html',
request, **klist(account=acc, selected_user_id=acc.id, form=form))
else:
return render_to_response('radmin/manage_accounts_students_acc.html',
request, account=acc, selected_user_id=acc.id, form=form, page=
Paginator(User.objects.filter(status=0).order_by('surname',
'name'), 30).page(1))
@must_be_admin
def reset_pwd(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for
i in range(7)])
acc.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (
randompass,))
return redirect('/admin/accounts/%s/' % (acc.id,))
@must_be_admin
def su(request, account_id):
"""Login as this user"""
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
request.logout()
request.login(acc.login)
messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %
(acc.login,))
return redirect('/')
@must_be_admin
def delete(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
if acc.login in ('demo@example.com', 'teacher@example.com',
'root@example.com'):
messages.add_message(request, messages.ERROR,
u'Nie można usunąć konta wbudowanego')
return redirect('/admin/accounts/%s/' % (acc.id,))
if acc.status == 1:
pass
messages.add_message(request, messages.SUCCESS,
u'Konto "%s %s" usunięte.' % (acc.name, acc.surname))
acc.delete()
return redirect('/admin/accounts/')
<|reserved_special_token_0|>
@must_be_admin
def view_students(request, page='1'):
page = int(page)
students = User.objects.filter(status=0).order_by('surname', 'name')
students = [x for x in students if not x.is_demo()]
p = Paginator(students, 30)
cpage = p.page(page)
return render_to_response('radmin/manage_accounts_students_list.html',
request, page=cpage)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def klist(**kwargs):
kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if
not x.is_demo()], 'admins': User.objects.filter(status=2)})
return kwargs
@must_be_admin
def list(request):
return render_to_response('radmin/manage_accounts_list.html', request,
**klist())
@must_be_admin
def account(request, account_id):
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
class AccountBaseForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'surname', 'number']
widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}
if request.method == 'POST':
form = AccountBaseForm(request.POST, instance=acc)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, u'Zapisano.')
else:
form = AccountBaseForm(instance=acc)
if acc.status != 0:
return render_to_response('radmin/manage_accounts_acc.html',
request, **klist(account=acc, selected_user_id=acc.id, form=form))
else:
return render_to_response('radmin/manage_accounts_students_acc.html',
request, account=acc, selected_user_id=acc.id, form=form, page=
Paginator(User.objects.filter(status=0).order_by('surname',
'name'), 30).page(1))
@must_be_admin
def reset_pwd(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for
i in range(7)])
acc.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (
randompass,))
return redirect('/admin/accounts/%s/' % (acc.id,))
@must_be_admin
def su(request, account_id):
"""Login as this user"""
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
request.logout()
request.login(acc.login)
messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %
(acc.login,))
return redirect('/')
@must_be_admin
def delete(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
if acc.login in ('demo@example.com', 'teacher@example.com',
'root@example.com'):
messages.add_message(request, messages.ERROR,
u'Nie można usunąć konta wbudowanego')
return redirect('/admin/accounts/%s/' % (acc.id,))
if acc.status == 1:
pass
messages.add_message(request, messages.SUCCESS,
u'Konto "%s %s" usunięte.' % (acc.name, acc.surname))
acc.delete()
return redirect('/admin/accounts/')
<|reserved_special_token_0|>
@must_be_admin
def view_students(request, page='1'):
page = int(page)
students = User.objects.filter(status=0).order_by('surname', 'name')
students = [x for x in students if not x.is_demo()]
p = Paginator(students, 30)
cpage = p.page(page)
return render_to_response('radmin/manage_accounts_students_list.html',
request, page=cpage)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def klist(**kwargs):
kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if
not x.is_demo()], 'admins': User.objects.filter(status=2)})
return kwargs
@must_be_admin
def list(request):
return render_to_response('radmin/manage_accounts_list.html', request,
**klist())
@must_be_admin
def account(request, account_id):
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
class AccountBaseForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'surname', 'number']
widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}
if request.method == 'POST':
form = AccountBaseForm(request.POST, instance=acc)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, u'Zapisano.')
else:
form = AccountBaseForm(instance=acc)
if acc.status != 0:
return render_to_response('radmin/manage_accounts_acc.html',
request, **klist(account=acc, selected_user_id=acc.id, form=form))
else:
return render_to_response('radmin/manage_accounts_students_acc.html',
request, account=acc, selected_user_id=acc.id, form=form, page=
Paginator(User.objects.filter(status=0).order_by('surname',
'name'), 30).page(1))
@must_be_admin
def reset_pwd(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for
i in range(7)])
acc.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (
randompass,))
return redirect('/admin/accounts/%s/' % (acc.id,))
@must_be_admin
def su(request, account_id):
"""Login as this user"""
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
request.logout()
request.login(acc.login)
messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %
(acc.login,))
return redirect('/')
@must_be_admin
def delete(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
if acc.login in ('demo@example.com', 'teacher@example.com',
'root@example.com'):
messages.add_message(request, messages.ERROR,
u'Nie można usunąć konta wbudowanego')
return redirect('/admin/accounts/%s/' % (acc.id,))
if acc.status == 1:
pass
messages.add_message(request, messages.SUCCESS,
u'Konto "%s %s" usunięte.' % (acc.name, acc.surname))
acc.delete()
return redirect('/admin/accounts/')
@must_be_admin
def create(request):
class NewAccountForm(forms.Form):
_CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')
login = forms.EmailField(label=u'E-mail')
name = forms.CharField(label=u'Imię', required=False)
surname = forms.CharField(label=u'Nazwisko', required=False)
status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')
if request.method == 'POST':
form = NewAccountForm(request.POST)
if form.is_valid():
from random import choice
randompass = ''.join([choice(
'1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])
u = User(login=form.cleaned_data['login'], name=form.
cleaned_data['name'], surname=form.cleaned_data['surname'],
status=form.cleaned_data['status'])
u.save()
u.set_password(randompass)
messages.add_message(request, messages.SUCCESS,
u'Konto stworzone. Nowe hasło to %s' % (randompass,))
return redirect('/admin/accounts/%s/' % (u.id,))
else:
form = NewAccountForm()
return render_to_response('radmin/manage_accounts_add.html', request,
**klist(selected_user_id='create', form=form))
<|reserved_special_token_0|>
@must_be_admin
def view_students(request, page='1'):
page = int(page)
students = User.objects.filter(status=0).order_by('surname', 'name')
students = [x for x in students if not x.is_demo()]
p = Paginator(students, 30)
cpage = p.page(page)
return render_to_response('radmin/manage_accounts_students_list.html',
request, page=cpage)
<|reserved_special_token_1|>
# coding=UTF-8
"""
View for managing accounts
"""
from django.contrib import messages
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from django import forms
from athena.core import render_to_response
from athena.users.models import User
from athena.users import must_be_admin
def klist(**kwargs):
kwargs.update({
'teachers': [x for x in User.objects.filter(status=1) if not x.is_demo()],
'admins': User.objects.filter(status=2),
})
return kwargs
@must_be_admin
def list(request):
return render_to_response('radmin/manage_accounts_list.html', request, **klist())
@must_be_admin
def account(request, account_id):
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
class AccountBaseForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'surname', 'number']
widgets = {
'name': forms.TextInput(),
'surname': forms.TextInput(),
}
if request.method == 'POST':
form = AccountBaseForm(request.POST, instance=acc)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, u'Zapisano.')
else:
form = AccountBaseForm(instance=acc)
if acc.status != 0:
return render_to_response('radmin/manage_accounts_acc.html', request, **klist(
account=acc,
selected_user_id=acc.id,
form=form))
else:
return render_to_response('radmin/manage_accounts_students_acc.html', request,
account=acc,
selected_user_id=acc.id,
form=form,
page=Paginator(User.objects.filter(status=0).order_by('surname', 'name'), 30).page(1))
@must_be_admin
def reset_pwd(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])
acc.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (randompass, ))
return redirect('/admin/accounts/%s/' % (acc.id, ))
@must_be_admin
def su(request, account_id):
"""Login as this user"""
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
request.logout()
request.login(acc.login)
messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' % (acc.login, ))
return redirect('/')
@must_be_admin
def delete(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
if acc.login in ('demo@example.com', 'teacher@example.com', 'root@example.com'):
messages.add_message(request, messages.ERROR, u'Nie można usunąć konta wbudowanego')
return redirect('/admin/accounts/%s/' % (acc.id, ))
if acc.status == 1:
# This is a teacher. You should reparent all of it's tests
# and groups to user to teacher@example.com
pass
messages.add_message(request, messages.SUCCESS, u'Konto "%s %s" usunięte.' % (acc.name, acc.surname))
acc.delete()
return redirect('/admin/accounts/')
@must_be_admin
def create(request):
class NewAccountForm(forms.Form):
_CHOICE = ((1, 'Nauczyciel'), (2, 'Adminstrator'))
login = forms.EmailField(label=u'E-mail')
name = forms.CharField(label=u'Imię', required=False)
surname = forms.CharField(label=u'Nazwisko', required=False)
status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')
if request.method == 'POST':
form = NewAccountForm(request.POST)
if form.is_valid():
# grab a random password
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])
u = User(login=form.cleaned_data['login'],
name=form.cleaned_data['name'],
surname=form.cleaned_data['surname'],
status=form.cleaned_data['status'])
u.save()
u.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Konto stworzone. Nowe hasło to %s' % (randompass, ))
return redirect('/admin/accounts/%s/' % (u.id, ))
else:
form = NewAccountForm()
return render_to_response('radmin/manage_accounts_add.html', request, **klist(
selected_user_id='create',
form=form))
from django.core.paginator import Paginator
@must_be_admin
def view_students(request, page='1'):
page = int(page)
students = User.objects.filter(status=0).order_by('surname', 'name')
students = [x for x in students if not x.is_demo()]
p = Paginator(students, 30)
cpage = p.page(page)
return render_to_response('radmin/manage_accounts_students_list.html', request,
page=cpage)
|
flexible
|
{
"blob_id": "a01ca49c3fa8ea76de2880c1b04bf15ccd341edd",
"index": 924,
"step-1": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n<mask token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n",
"step-2": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n<mask token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('demo@example.com', 'teacher@example.com',\n 'root@example.com'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n",
"step-3": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('demo@example.com', 'teacher@example.com',\n 'root@example.com'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n",
"step-4": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('demo@example.com', 'teacher@example.com',\n 'root@example.com'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n",
"step-5": "# coding=UTF-8\n\"\"\"\nView for managing accounts\n\"\"\"\n\nfrom django.contrib import messages\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django import forms\nfrom athena.core import render_to_response\nfrom athena.users.models import User\nfrom athena.users import must_be_admin\n\n\ndef klist(**kwargs):\n kwargs.update({\n 'teachers': [x for x in User.objects.filter(status=1) if not x.is_demo()],\n 'admins': User.objects.filter(status=2),\n })\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request, **klist())\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n class AccountBaseForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {\n 'name': forms.TextInput(),\n 'surname': forms.TextInput(),\n }\n\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n\n else:\n form = AccountBaseForm(instance=acc)\n\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html', request, **klist(\n account=acc,\n selected_user_id=acc.id,\n form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html', request,\n account=acc,\n selected_user_id=acc.id,\n form=form,\n page=Paginator(User.objects.filter(status=0).order_by('surname', 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n\n acc.set_password(randompass)\n\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (randompass, ))\n\n return redirect('/admin/accounts/%s/' % (acc.id, ))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n request.logout()\n request.login(acc.login)\n\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' % (acc.login, ))\n\n return redirect('/')\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n if acc.login in ('demo@example.com', 'teacher@example.com', 'root@example.com'):\n messages.add_message(request, messages.ERROR, u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id, ))\n\n if acc.status == 1:\n # This is a teacher. You should reparent all of it's tests\n # and groups to user to teacher@example.com\n pass\n\n messages.add_message(request, messages.SUCCESS, u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n\n acc.delete()\n\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n class NewAccountForm(forms.Form):\n _CHOICE = ((1, 'Nauczyciel'), (2, 'Adminstrator'))\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False) \n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n\n if form.is_valid():\n\n # grab a random password\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n\n u = User(login=form.cleaned_data['login'],\n name=form.cleaned_data['name'],\n surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n\n messages.add_message(request, messages.SUCCESS, u'Konto stworzone. Nowe hasło to %s' % (randompass, ))\n\n return redirect('/admin/accounts/%s/' % (u.id, ))\n\n else:\n form = NewAccountForm()\n\n return render_to_response('radmin/manage_accounts_add.html', request, **klist(\n selected_user_id='create',\n form=form))\n\nfrom django.core.paginator import Paginator\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n\n cpage = p.page(page)\n\n return render_to_response('radmin/manage_accounts_students_list.html', request,\n page=cpage)",
"step-ids": [
3,
6,
7,
8,
10
]
}
|
[
3,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = (
get_generator(dataset))
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True, verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR), loss=
'categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(
epoch=0))
history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=
val_generator, callbacks=[checkpoint, earlystop_callback], workers=
6, use_multiprocessing=True)
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
predictions = model.predict(val_generator, workers=6,
use_multiprocessing=True, verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = (
get_generator(dataset))
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True, verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR), loss=
'categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(
epoch=0))
history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=
val_generator, callbacks=[checkpoint, earlystop_callback], workers=
6, use_multiprocessing=True)
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
predictions = model.predict(val_generator, workers=6,
use_multiprocessing=True, verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':
DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':
'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,
'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':
BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}
print('save config in' + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = (
get_generator(dataset))
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True, verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR), loss=
'categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(
epoch=0))
history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=
val_generator, callbacks=[checkpoint, earlystop_callback], workers=
6, use_multiprocessing=True)
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
predictions = model.predict(val_generator, workers=6,
use_multiprocessing=True, verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':
DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':
'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,
'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':
BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}
print('save config in' + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
Train(dataset=2)
<|reserved_special_token_1|>
import json
from constants import *
from coattention_layer import *
from prepare_generator import *
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = (
get_generator(dataset))
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True, verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR), loss=
'categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(
epoch=0))
history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=
val_generator, callbacks=[checkpoint, earlystop_callback], workers=
6, use_multiprocessing=True)
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
predictions = model.predict(val_generator, workers=6,
use_multiprocessing=True, verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':
DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':
'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,
'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':
BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}
print('save config in' + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
Train(dataset=2)
<|reserved_special_token_1|>
import json
from constants import *
from coattention_layer import *
from prepare_generator import *
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(
dataset)
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True,
verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# Save the weights using the `checkpoint_path` format
model.save_weights(CHECKPOINT_PATH +
'/cp-{epoch: 04d}.ckpt'.format(epoch=0))
history = model.fit(x=train_generator,
epochs=EPOCHS,
validation_data=val_generator,
callbacks=[checkpoint, earlystop_callback],
workers=6,
use_multiprocessing=True)
# save history
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
# prediction
predictions = model.predict(val_generator,
workers=6,
use_multiprocessing=True,
verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention',
'EMBEDDING': 'keras',
"DATASET": DATASET,
"OPTIMIZER": 'Adam',
"EARLY STOPPING": 'val_loss',
"LOSS": 'categorical_crossentropy',
'DROPOUT_RATE': DROPOUT_RATE,
"EMBEDDING_DIM": EMBEDDING_DIM,
"EPOCHS": EPOCHS,
"BATCH_SIZE": BATCH_SIZE,
"SEQ_LENGTH": SEQ_LENGTH,
"NUM_CLASSES": NUM_CLASSES}
print("save config in" + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
Train(dataset=2)
|
flexible
|
{
"blob_id": "a8d52d81ef6538e9cb8a0a9cab7cd0a778454c8e",
"index": 6424,
"step-1": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\nTrain(dataset=2)\n",
"step-4": "import json\nfrom constants import *\nfrom coattention_layer import *\nfrom prepare_generator import *\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\nTrain(dataset=2)\n",
"step-5": "import json\r\nfrom constants import *\r\nfrom coattention_layer import *\r\nfrom prepare_generator import *\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping\r\n\r\n\r\ndef coattention(num_embeddings):\r\n image_input = Input(shape=(196, 512))\r\n question_input = Input(shape=(SEQ_LENGTH,))\r\n\r\n output = CoattentionModel(num_embeddings)(question_input, image_input)\r\n\r\n model = Model(inputs=[question_input, image_input], outputs=output)\r\n\r\n return model\r\n\r\n\r\ndef scheduler(epoch):\r\n if epoch < 10:\r\n return 0.0001\r\n else:\r\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\r\n\r\n\r\ndef Train(dataset=True):\r\n\r\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(\r\n dataset)\r\n\r\n save_config(dataset)\r\n\r\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\r\n save_weights_only=True,\r\n verbose=1)\r\n\r\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\r\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\r\n\r\n model = coattention(VOCAB_SIZE)\r\n\r\n model.compile(optimizer=Adam(learning_rate=LR),\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n model.summary()\r\n\r\n # Save the weights using the `checkpoint_path` format\r\n model.save_weights(CHECKPOINT_PATH +\r\n '/cp-{epoch: 04d}.ckpt'.format(epoch=0))\r\n\r\n history = model.fit(x=train_generator,\r\n epochs=EPOCHS,\r\n validation_data=val_generator,\r\n callbacks=[checkpoint, earlystop_callback],\r\n workers=6,\r\n use_multiprocessing=True)\r\n\r\n # save history\r\n with open(HISTORY_PATH, 'w') as file:\r\n json.dump(history.history, file)\r\n\r\n # prediction\r\n predictions = model.predict(val_generator,\r\n workers=6,\r\n use_multiprocessing=True,\r\n verbose=1)\r\n\r\n ans_vocab = load_ans_vocab()\r\n\r\n result = []\r\n for q in range(len(val_question_ids)):\r\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\r\n q_id = int(val_question_ids[q])\r\n result.append({u'answer': ans, u'question_id': q_id})\r\n\r\n with open(PRED_PATH, 'w') as file:\r\n json.dump(list(result), file)\r\n\r\n return\r\n\r\n\r\ndef save_config(dataset):\r\n if dataset == 0:\r\n DATASET = 'English'\r\n if dataset == 1:\r\n DATASET = 'Google'\r\n if dataset == 2:\r\n DATASET = 'Targoman'\r\n\r\n config = {'NAME': 'coattention',\r\n 'EMBEDDING': 'keras',\r\n \"DATASET\": DATASET,\r\n \"OPTIMIZER\": 'Adam',\r\n \"EARLY STOPPING\": 'val_loss',\r\n \"LOSS\": 'categorical_crossentropy',\r\n 'DROPOUT_RATE': DROPOUT_RATE,\r\n \"EMBEDDING_DIM\": EMBEDDING_DIM,\r\n \"EPOCHS\": EPOCHS,\r\n \"BATCH_SIZE\": BATCH_SIZE,\r\n \"SEQ_LENGTH\": SEQ_LENGTH,\r\n \"NUM_CLASSES\": NUM_CLASSES}\r\n\r\n print(\"save config in\" + str(CONFIG_PATH))\r\n with open(CONFIG_PATH, 'w') as file:\r\n json.dump(config, file)\r\n\r\n return\r\n\r\n\r\nTrain(dataset=2)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when('Sort by price low to high')
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when('Sort by price low to high')
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
@then('Validate price order')
def validate_price_order(context):
context.products.validate_price_order()
<|reserved_special_token_1|>
from behave import given, when, then
from pages.LoginPage import LoginPage
from pages.ProductsPage import ProductsPage
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when('Sort by price low to high')
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
@then('Validate price order')
def validate_price_order(context):
context.products.validate_price_order()
<|reserved_special_token_1|>
from behave import given, when, then
from pages.LoginPage import LoginPage
from pages.ProductsPage import ProductsPage
class ProductsListSteps:
@given("Prepare classes products list")
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when("Sort by price low to high")
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
@then("Validate price order")
def validate_price_order(context):
context.products.validate_price_order()
|
flexible
|
{
"blob_id": "a74a880039bad030d665e001da74075bd61fcc23",
"index": 1593,
"step-1": "<mask token>\n\n\nclass ProductsListSteps:\n\n @given('Prepare classes products list')\n def prepare_class(context):\n context.login = LoginPage(context.driver)\n context.products = ProductsPage(context.driver)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductsListSteps:\n\n @given('Prepare classes products list')\n def prepare_class(context):\n context.login = LoginPage(context.driver)\n context.products = ProductsPage(context.driver)\n\n @when('Sort by price low to high')\n def sort_low_to_high(context):\n context.products.sort_price_low_to_high()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ProductsListSteps:\n\n @given('Prepare classes products list')\n def prepare_class(context):\n context.login = LoginPage(context.driver)\n context.products = ProductsPage(context.driver)\n\n @when('Sort by price low to high')\n def sort_low_to_high(context):\n context.products.sort_price_low_to_high()\n\n @then('Validate price order')\n def validate_price_order(context):\n context.products.validate_price_order()\n",
"step-4": "from behave import given, when, then\nfrom pages.LoginPage import LoginPage\nfrom pages.ProductsPage import ProductsPage\n\n\nclass ProductsListSteps:\n\n @given('Prepare classes products list')\n def prepare_class(context):\n context.login = LoginPage(context.driver)\n context.products = ProductsPage(context.driver)\n\n @when('Sort by price low to high')\n def sort_low_to_high(context):\n context.products.sort_price_low_to_high()\n\n @then('Validate price order')\n def validate_price_order(context):\n context.products.validate_price_order()\n",
"step-5": "from behave import given, when, then\nfrom pages.LoginPage import LoginPage\nfrom pages.ProductsPage import ProductsPage\n\nclass ProductsListSteps:\n\n @given(\"Prepare classes products list\")\n def prepare_class(context):\n context.login = LoginPage(context.driver)\n context.products = ProductsPage(context.driver)\n\n @when(\"Sort by price low to high\")\n def sort_low_to_high(context):\n context.products.sort_price_low_to_high()\n\n @then(\"Validate price order\")\n def validate_price_order(context):\n context.products.validate_price_order()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def fit_linear_model(x, y):
logger.info('Using Lasso')
lr = Lasso(alpha=0.01)
lr.fit(x, y)
return SharedScalerModel(lr)
class SharedScalerModel:
def __init__(self, lm):
self.lm = lm
self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.
float64), dim=0)
self.intercept_ = lm.intercept_
def predict(self, X):
return torch.tensor(self.lm.predict(X), dtype=torch.float64)
def train(c_1, c_2, id2pn, dataset):
if not os.path.exists('../model'):
os.mkdir('../model')
mdl_name = '{}_{}'.format(c_1, c_2)
logger.info('Train the model: {} {}'.format(mdl_name, id2pn))
train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=
dataset)
train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()
train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],
dtype=torch.float64)
test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=
dataset)
test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)
test_labels = np.array([id2pn[i.item()] for i in test_labels])
counter = defaultdict(int)
for i in train_labels:
counter[i.item()] += 1
train_pos, train_neg = counter[1], counter[0]
counter = defaultdict(int)
for i in test_labels:
counter[i.item()] += 1
test_pos, test_neg = counter[1], counter[0]
logger.info('Train_labels {} '.format(train_labels))
logger.info('Test_labels {} '.format(test_labels))
logger.info(
"""
======================================================================
Data Information
& \\# Positive & \\# Negative & \\# Positive & \\# Negative \\
\\hline
{} {} & {} & {} & {} & {} \\
\\hline
======================================================================
"""
.format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))
logger.info('train_labels {}'.format(train_labels))
lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,
min_split_improvement=MIN_SPLIT_IMPROVEMENT)
lmt.build_tree(train_data, train_labels)
logger.info('Finish building trees')
lmt.merge_lrs(lmt.root)
logger.info('Finish merging trees')
path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=
dataset, if_train_set=None)
model_path = path_manager.mdl_path()
with open(model_path, 'wb') as f:
pickle.dump(lmt, f)
lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')
train_data = train_data.to(config.DEVICE)
_test('LMT', lmt, train_data, train_labels, 'Trainset')
_test('LMT', lmt, test_data, test_labels, 'Testset')
def _test(mdl_name, lmt, test_data, test_labels, if_train):
y_pred = lmt.predict_positive(test_data)
correct = 0
for i in range(len(test_labels)):
p_label = 1 if y_pred[i] > 0.5 else 0
logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,
y_pred[i], test_labels[i]))
if p_label == test_labels[i]:
correct += 1
precision = correct * 1.0 / len(test_labels)
logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,
mdl_name, correct, len(test_labels), precision))
def load(model_path):
lmt = pickle.load(open(model_path, 'rb'))
return lmt
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fit_linear_model(x, y):
logger.info('Using Lasso')
lr = Lasso(alpha=0.01)
lr.fit(x, y)
return SharedScalerModel(lr)
class SharedScalerModel:
def __init__(self, lm):
self.lm = lm
self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.
float64), dim=0)
self.intercept_ = lm.intercept_
def predict(self, X):
return torch.tensor(self.lm.predict(X), dtype=torch.float64)
def train(c_1, c_2, id2pn, dataset):
if not os.path.exists('../model'):
os.mkdir('../model')
mdl_name = '{}_{}'.format(c_1, c_2)
logger.info('Train the model: {} {}'.format(mdl_name, id2pn))
train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=
dataset)
train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()
train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],
dtype=torch.float64)
test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=
dataset)
test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)
test_labels = np.array([id2pn[i.item()] for i in test_labels])
counter = defaultdict(int)
for i in train_labels:
counter[i.item()] += 1
train_pos, train_neg = counter[1], counter[0]
counter = defaultdict(int)
for i in test_labels:
counter[i.item()] += 1
test_pos, test_neg = counter[1], counter[0]
logger.info('Train_labels {} '.format(train_labels))
logger.info('Test_labels {} '.format(test_labels))
logger.info(
"""
======================================================================
Data Information
& \\# Positive & \\# Negative & \\# Positive & \\# Negative \\
\\hline
{} {} & {} & {} & {} & {} \\
\\hline
======================================================================
"""
.format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))
logger.info('train_labels {}'.format(train_labels))
lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,
min_split_improvement=MIN_SPLIT_IMPROVEMENT)
lmt.build_tree(train_data, train_labels)
logger.info('Finish building trees')
lmt.merge_lrs(lmt.root)
logger.info('Finish merging trees')
path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=
dataset, if_train_set=None)
model_path = path_manager.mdl_path()
with open(model_path, 'wb') as f:
pickle.dump(lmt, f)
lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')
train_data = train_data.to(config.DEVICE)
_test('LMT', lmt, train_data, train_labels, 'Trainset')
_test('LMT', lmt, test_data, test_labels, 'Testset')
def _test(mdl_name, lmt, test_data, test_labels, if_train):
y_pred = lmt.predict_positive(test_data)
correct = 0
for i in range(len(test_labels)):
p_label = 1 if y_pred[i] > 0.5 else 0
logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,
y_pred[i], test_labels[i]))
if p_label == test_labels[i]:
correct += 1
precision = correct * 1.0 / len(test_labels)
logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,
mdl_name, correct, len(test_labels), precision))
def load(model_path):
lmt = pickle.load(open(model_path, 'rb'))
return lmt
def test_1():
mdl = load_model('Pullover', 'Coat', 'FMNIST', 'LMT')
images, labels = load_data_new('Pullover', 'Coat', train=False, dataset
='FMNIST')
images = images.view(-1, 784)
forward = mdl.forward(images)
logger.info('forward.size() => {}'.format(forward.size()))
prob = mdl.predict_positive(images)
logger.info('prob.size() => {}'.format(prob.size()))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = getLogger(__name__)
IMG_SIZE = 28
MIN_NODE_SIZE = 15
MIN_SPLIT_IMPROVEMENT = 10
def fit_linear_model(x, y):
logger.info('Using Lasso')
lr = Lasso(alpha=0.01)
lr.fit(x, y)
return SharedScalerModel(lr)
class SharedScalerModel:
def __init__(self, lm):
self.lm = lm
self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.
float64), dim=0)
self.intercept_ = lm.intercept_
def predict(self, X):
return torch.tensor(self.lm.predict(X), dtype=torch.float64)
def train(c_1, c_2, id2pn, dataset):
if not os.path.exists('../model'):
os.mkdir('../model')
mdl_name = '{}_{}'.format(c_1, c_2)
logger.info('Train the model: {} {}'.format(mdl_name, id2pn))
train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=
dataset)
train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()
train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],
dtype=torch.float64)
test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=
dataset)
test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)
test_labels = np.array([id2pn[i.item()] for i in test_labels])
counter = defaultdict(int)
for i in train_labels:
counter[i.item()] += 1
train_pos, train_neg = counter[1], counter[0]
counter = defaultdict(int)
for i in test_labels:
counter[i.item()] += 1
test_pos, test_neg = counter[1], counter[0]
logger.info('Train_labels {} '.format(train_labels))
logger.info('Test_labels {} '.format(test_labels))
logger.info(
"""
======================================================================
Data Information
& \\# Positive & \\# Negative & \\# Positive & \\# Negative \\
\\hline
{} {} & {} & {} & {} & {} \\
\\hline
======================================================================
"""
.format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))
logger.info('train_labels {}'.format(train_labels))
lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,
min_split_improvement=MIN_SPLIT_IMPROVEMENT)
lmt.build_tree(train_data, train_labels)
logger.info('Finish building trees')
lmt.merge_lrs(lmt.root)
logger.info('Finish merging trees')
path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=
dataset, if_train_set=None)
model_path = path_manager.mdl_path()
with open(model_path, 'wb') as f:
pickle.dump(lmt, f)
lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')
train_data = train_data.to(config.DEVICE)
_test('LMT', lmt, train_data, train_labels, 'Trainset')
_test('LMT', lmt, test_data, test_labels, 'Testset')
def _test(mdl_name, lmt, test_data, test_labels, if_train):
y_pred = lmt.predict_positive(test_data)
correct = 0
for i in range(len(test_labels)):
p_label = 1 if y_pred[i] > 0.5 else 0
logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,
y_pred[i], test_labels[i]))
if p_label == test_labels[i]:
correct += 1
precision = correct * 1.0 / len(test_labels)
logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,
mdl_name, correct, len(test_labels), precision))
def load(model_path):
lmt = pickle.load(open(model_path, 'rb'))
return lmt
def test_1():
mdl = load_model('Pullover', 'Coat', 'FMNIST', 'LMT')
images, labels = load_data_new('Pullover', 'Coat', train=False, dataset
='FMNIST')
images = images.view(-1, 784)
forward = mdl.forward(images)
logger.info('forward.size() => {}'.format(forward.size()))
prob = mdl.predict_positive(images)
logger.info('prob.size() => {}'.format(prob.size()))
if __name__ == '__main__':
test_1()
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
import os
import torch
import numpy as np
import pickle
from sklearn.linear_model import Ridge, Lasso
from biplnn.log import getLogger
from biplnn.utils import load_data_new, load_model, PathManager, load_data_new
from biplnn.mdl.lmt import LinearModelTree
from biplnn import config
logger = getLogger(__name__)
IMG_SIZE = 28
MIN_NODE_SIZE = 15
MIN_SPLIT_IMPROVEMENT = 10
def fit_linear_model(x, y):
logger.info('Using Lasso')
lr = Lasso(alpha=0.01)
lr.fit(x, y)
return SharedScalerModel(lr)
class SharedScalerModel:
def __init__(self, lm):
self.lm = lm
self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.
float64), dim=0)
self.intercept_ = lm.intercept_
def predict(self, X):
return torch.tensor(self.lm.predict(X), dtype=torch.float64)
def train(c_1, c_2, id2pn, dataset):
if not os.path.exists('../model'):
os.mkdir('../model')
mdl_name = '{}_{}'.format(c_1, c_2)
logger.info('Train the model: {} {}'.format(mdl_name, id2pn))
train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=
dataset)
train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()
train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],
dtype=torch.float64)
test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=
dataset)
test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)
test_labels = np.array([id2pn[i.item()] for i in test_labels])
counter = defaultdict(int)
for i in train_labels:
counter[i.item()] += 1
train_pos, train_neg = counter[1], counter[0]
counter = defaultdict(int)
for i in test_labels:
counter[i.item()] += 1
test_pos, test_neg = counter[1], counter[0]
logger.info('Train_labels {} '.format(train_labels))
logger.info('Test_labels {} '.format(test_labels))
logger.info(
"""
======================================================================
Data Information
& \\# Positive & \\# Negative & \\# Positive & \\# Negative \\
\\hline
{} {} & {} & {} & {} & {} \\
\\hline
======================================================================
"""
.format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))
logger.info('train_labels {}'.format(train_labels))
lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,
min_split_improvement=MIN_SPLIT_IMPROVEMENT)
lmt.build_tree(train_data, train_labels)
logger.info('Finish building trees')
lmt.merge_lrs(lmt.root)
logger.info('Finish merging trees')
path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=
dataset, if_train_set=None)
model_path = path_manager.mdl_path()
with open(model_path, 'wb') as f:
pickle.dump(lmt, f)
lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')
train_data = train_data.to(config.DEVICE)
_test('LMT', lmt, train_data, train_labels, 'Trainset')
_test('LMT', lmt, test_data, test_labels, 'Testset')
def _test(mdl_name, lmt, test_data, test_labels, if_train):
y_pred = lmt.predict_positive(test_data)
correct = 0
for i in range(len(test_labels)):
p_label = 1 if y_pred[i] > 0.5 else 0
logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,
y_pred[i], test_labels[i]))
if p_label == test_labels[i]:
correct += 1
precision = correct * 1.0 / len(test_labels)
logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,
mdl_name, correct, len(test_labels), precision))
def load(model_path):
lmt = pickle.load(open(model_path, 'rb'))
return lmt
def test_1():
mdl = load_model('Pullover', 'Coat', 'FMNIST', 'LMT')
images, labels = load_data_new('Pullover', 'Coat', train=False, dataset
='FMNIST')
images = images.view(-1, 784)
forward = mdl.forward(images)
logger.info('forward.size() => {}'.format(forward.size()))
prob = mdl.predict_positive(images)
logger.info('prob.size() => {}'.format(prob.size()))
if __name__ == '__main__':
test_1()
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
import os
import torch
import numpy as np
import pickle
from sklearn.linear_model import Ridge, Lasso
from biplnn.log import getLogger
from biplnn.utils import load_data_new, load_model, PathManager, load_data_new
from biplnn.mdl.lmt import LinearModelTree
from biplnn import config
logger = getLogger(__name__)
IMG_SIZE = 28
MIN_NODE_SIZE = 15
MIN_SPLIT_IMPROVEMENT = 10
def fit_linear_model(x, y):
logger.info("Using Lasso")
lr = Lasso(alpha=0.01)
lr.fit(x, y)
return SharedScalerModel(lr)
class SharedScalerModel:
def __init__(self, lm):
self.lm = lm
self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.float64), dim=0)
self.intercept_ = lm.intercept_
def predict(self, X):
return torch.tensor(self.lm.predict(X), dtype=torch.float64)
def train(c_1, c_2, id2pn, dataset):
if not os.path.exists("../model"):
os.mkdir("../model")
mdl_name = "{}_{}".format(c_1, c_2)
logger.info("Train the model: {} {}".format(mdl_name, id2pn))
train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=dataset)
train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()
train_labels = torch.tensor([id2pn[i.item()] for i in train_labels], dtype=torch.float64)
test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=dataset)
test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)
test_labels = np.array([id2pn[i.item()] for i in test_labels])
counter = defaultdict(int)
for i in train_labels:
counter[i.item()] += 1
train_pos, train_neg = counter[1], counter[0]
counter = defaultdict(int)
for i in test_labels:
counter[i.item()] += 1
test_pos, test_neg = counter[1], counter[0]
logger.info("Train_labels {} ".format(train_labels))
logger.info("Test_labels {} ".format(test_labels))
logger.info("""
======================================================================
Data Information
& \# Positive & \# Negative & \# Positive & \# Negative \\
\hline
{} {} & {} & {} & {} & {} \\
\hline
======================================================================
""".format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))
logger.info("train_labels {}".format(train_labels))
lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model, min_split_improvement=MIN_SPLIT_IMPROVEMENT)
lmt.build_tree(train_data, train_labels)
logger.info("Finish building trees")
lmt.merge_lrs(lmt.root)
logger.info("Finish merging trees")
path_manager = PathManager(mdl_name="LMT", c_1=c_1, c_2=c_2, dataset=dataset, if_train_set=None)
model_path = path_manager.mdl_path()
with open(model_path, "wb") as f:
pickle.dump(lmt, f)
lmt = load_model(c_1, c_2, dataset=dataset, model_name="LMT")
train_data = train_data.to(config.DEVICE)
_test("LMT", lmt, train_data, train_labels, "Trainset")
_test("LMT", lmt, test_data, test_labels, "Testset")
def _test(mdl_name, lmt, test_data, test_labels, if_train):
y_pred = lmt.predict_positive(test_data)
correct = 0
for i in range(len(test_labels)):
p_label = 1 if y_pred[i] > 0.5 else 0
logger.debug("p_label: {} Prob: {} train_label: {}".format(p_label, y_pred[i], test_labels[i]))
if p_label == test_labels[i]:
correct += 1
precision = correct * 1.0 / len(test_labels)
logger.info("[{} dataset] Model: {} Accuracy: {}/{}={}".format(if_train, mdl_name, correct, len(test_labels), precision))
def load(model_path):
lmt = pickle.load(open(model_path, "rb"))
return lmt
def test_1():
mdl = load_model("Pullover", "Coat", "FMNIST", "LMT")
images, labels = load_data_new("Pullover", "Coat", train=False, dataset="FMNIST")
images = images.view(-1, 784)
forward = mdl.forward(images)
logger.info("forward.size() => {}".format(forward.size()))
prob = mdl.predict_positive(images)
logger.info("prob.size() => {}".format(prob.size()))
if __name__ == '__main__':
# main()
# train_main("Pullover", "Coat")
# test("Pullover", "Coat", FMNIST.id2pn_label(FMNIST.str2id("Pullover"), FMNIST.str2id("Coat")))
test_1()
|
flexible
|
{
"blob_id": "9f86ff37d3a72364b5bd83e425d8151136c07dd3",
"index": 6294,
"step-1": "<mask token>\n\n\ndef fit_linear_model(x, y):\n logger.info('Using Lasso')\n lr = Lasso(alpha=0.01)\n lr.fit(x, y)\n return SharedScalerModel(lr)\n\n\nclass SharedScalerModel:\n\n def __init__(self, lm):\n self.lm = lm\n self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.\n float64), dim=0)\n self.intercept_ = lm.intercept_\n\n def predict(self, X):\n return torch.tensor(self.lm.predict(X), dtype=torch.float64)\n\n\ndef train(c_1, c_2, id2pn, dataset):\n if not os.path.exists('../model'):\n os.mkdir('../model')\n mdl_name = '{}_{}'.format(c_1, c_2)\n logger.info('Train the model: {} {}'.format(mdl_name, id2pn))\n train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=\n dataset)\n train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()\n train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],\n dtype=torch.float64)\n test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=\n dataset)\n test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)\n test_labels = np.array([id2pn[i.item()] for i in test_labels])\n counter = defaultdict(int)\n for i in train_labels:\n counter[i.item()] += 1\n train_pos, train_neg = counter[1], counter[0]\n counter = defaultdict(int)\n for i in test_labels:\n counter[i.item()] += 1\n test_pos, test_neg = counter[1], counter[0]\n logger.info('Train_labels {} '.format(train_labels))\n logger.info('Test_labels {} '.format(test_labels))\n logger.info(\n \"\"\"\n ======================================================================\n Data Information\n & \\\\# Positive & \\\\# Negative & \\\\# Positive & \\\\# Negative \\\\\n \\\\hline\n {} {} & {} & {} & {} & {} \\\\\n \\\\hline\n ======================================================================\n \"\"\"\n .format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))\n logger.info('train_labels {}'.format(train_labels))\n lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,\n min_split_improvement=MIN_SPLIT_IMPROVEMENT)\n lmt.build_tree(train_data, train_labels)\n logger.info('Finish building trees')\n lmt.merge_lrs(lmt.root)\n logger.info('Finish merging trees')\n path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=\n dataset, if_train_set=None)\n model_path = path_manager.mdl_path()\n with open(model_path, 'wb') as f:\n pickle.dump(lmt, f)\n lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')\n train_data = train_data.to(config.DEVICE)\n _test('LMT', lmt, train_data, train_labels, 'Trainset')\n _test('LMT', lmt, test_data, test_labels, 'Testset')\n\n\ndef _test(mdl_name, lmt, test_data, test_labels, if_train):\n y_pred = lmt.predict_positive(test_data)\n correct = 0\n for i in range(len(test_labels)):\n p_label = 1 if y_pred[i] > 0.5 else 0\n logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,\n y_pred[i], test_labels[i]))\n if p_label == test_labels[i]:\n correct += 1\n precision = correct * 1.0 / len(test_labels)\n logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,\n mdl_name, correct, len(test_labels), precision))\n\n\ndef load(model_path):\n lmt = pickle.load(open(model_path, 'rb'))\n return lmt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fit_linear_model(x, y):\n logger.info('Using Lasso')\n lr = Lasso(alpha=0.01)\n lr.fit(x, y)\n return SharedScalerModel(lr)\n\n\nclass SharedScalerModel:\n\n def __init__(self, lm):\n self.lm = lm\n self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.\n float64), dim=0)\n self.intercept_ = lm.intercept_\n\n def predict(self, X):\n return torch.tensor(self.lm.predict(X), dtype=torch.float64)\n\n\ndef train(c_1, c_2, id2pn, dataset):\n if not os.path.exists('../model'):\n os.mkdir('../model')\n mdl_name = '{}_{}'.format(c_1, c_2)\n logger.info('Train the model: {} {}'.format(mdl_name, id2pn))\n train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=\n dataset)\n train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()\n train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],\n dtype=torch.float64)\n test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=\n dataset)\n test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)\n test_labels = np.array([id2pn[i.item()] for i in test_labels])\n counter = defaultdict(int)\n for i in train_labels:\n counter[i.item()] += 1\n train_pos, train_neg = counter[1], counter[0]\n counter = defaultdict(int)\n for i in test_labels:\n counter[i.item()] += 1\n test_pos, test_neg = counter[1], counter[0]\n logger.info('Train_labels {} '.format(train_labels))\n logger.info('Test_labels {} '.format(test_labels))\n logger.info(\n \"\"\"\n ======================================================================\n Data Information\n & \\\\# Positive & \\\\# Negative & \\\\# Positive & \\\\# Negative \\\\\n \\\\hline\n {} {} & {} & {} & {} & {} \\\\\n \\\\hline\n ======================================================================\n \"\"\"\n .format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))\n logger.info('train_labels {}'.format(train_labels))\n lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,\n min_split_improvement=MIN_SPLIT_IMPROVEMENT)\n lmt.build_tree(train_data, train_labels)\n logger.info('Finish building trees')\n lmt.merge_lrs(lmt.root)\n logger.info('Finish merging trees')\n path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=\n dataset, if_train_set=None)\n model_path = path_manager.mdl_path()\n with open(model_path, 'wb') as f:\n pickle.dump(lmt, f)\n lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')\n train_data = train_data.to(config.DEVICE)\n _test('LMT', lmt, train_data, train_labels, 'Trainset')\n _test('LMT', lmt, test_data, test_labels, 'Testset')\n\n\ndef _test(mdl_name, lmt, test_data, test_labels, if_train):\n y_pred = lmt.predict_positive(test_data)\n correct = 0\n for i in range(len(test_labels)):\n p_label = 1 if y_pred[i] > 0.5 else 0\n logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,\n y_pred[i], test_labels[i]))\n if p_label == test_labels[i]:\n correct += 1\n precision = correct * 1.0 / len(test_labels)\n logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,\n mdl_name, correct, len(test_labels), precision))\n\n\ndef load(model_path):\n lmt = pickle.load(open(model_path, 'rb'))\n return lmt\n\n\ndef test_1():\n mdl = load_model('Pullover', 'Coat', 'FMNIST', 'LMT')\n images, labels = load_data_new('Pullover', 'Coat', train=False, dataset\n ='FMNIST')\n images = images.view(-1, 784)\n forward = mdl.forward(images)\n logger.info('forward.size() => {}'.format(forward.size()))\n prob = mdl.predict_positive(images)\n logger.info('prob.size() => {}'.format(prob.size()))\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger = getLogger(__name__)\nIMG_SIZE = 28\nMIN_NODE_SIZE = 15\nMIN_SPLIT_IMPROVEMENT = 10\n\n\ndef fit_linear_model(x, y):\n logger.info('Using Lasso')\n lr = Lasso(alpha=0.01)\n lr.fit(x, y)\n return SharedScalerModel(lr)\n\n\nclass SharedScalerModel:\n\n def __init__(self, lm):\n self.lm = lm\n self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.\n float64), dim=0)\n self.intercept_ = lm.intercept_\n\n def predict(self, X):\n return torch.tensor(self.lm.predict(X), dtype=torch.float64)\n\n\ndef train(c_1, c_2, id2pn, dataset):\n if not os.path.exists('../model'):\n os.mkdir('../model')\n mdl_name = '{}_{}'.format(c_1, c_2)\n logger.info('Train the model: {} {}'.format(mdl_name, id2pn))\n train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=\n dataset)\n train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()\n train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],\n dtype=torch.float64)\n test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=\n dataset)\n test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)\n test_labels = np.array([id2pn[i.item()] for i in test_labels])\n counter = defaultdict(int)\n for i in train_labels:\n counter[i.item()] += 1\n train_pos, train_neg = counter[1], counter[0]\n counter = defaultdict(int)\n for i in test_labels:\n counter[i.item()] += 1\n test_pos, test_neg = counter[1], counter[0]\n logger.info('Train_labels {} '.format(train_labels))\n logger.info('Test_labels {} '.format(test_labels))\n logger.info(\n \"\"\"\n ======================================================================\n Data Information\n & \\\\# Positive & \\\\# Negative & \\\\# Positive & \\\\# Negative \\\\\n \\\\hline\n {} {} & {} & {} & {} & {} \\\\\n \\\\hline\n ======================================================================\n \"\"\"\n .format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))\n logger.info('train_labels {}'.format(train_labels))\n lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,\n min_split_improvement=MIN_SPLIT_IMPROVEMENT)\n lmt.build_tree(train_data, train_labels)\n logger.info('Finish building trees')\n lmt.merge_lrs(lmt.root)\n logger.info('Finish merging trees')\n path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=\n dataset, if_train_set=None)\n model_path = path_manager.mdl_path()\n with open(model_path, 'wb') as f:\n pickle.dump(lmt, f)\n lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')\n train_data = train_data.to(config.DEVICE)\n _test('LMT', lmt, train_data, train_labels, 'Trainset')\n _test('LMT', lmt, test_data, test_labels, 'Testset')\n\n\ndef _test(mdl_name, lmt, test_data, test_labels, if_train):\n y_pred = lmt.predict_positive(test_data)\n correct = 0\n for i in range(len(test_labels)):\n p_label = 1 if y_pred[i] > 0.5 else 0\n logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,\n y_pred[i], test_labels[i]))\n if p_label == test_labels[i]:\n correct += 1\n precision = correct * 1.0 / len(test_labels)\n logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,\n mdl_name, correct, len(test_labels), precision))\n\n\ndef load(model_path):\n lmt = pickle.load(open(model_path, 'rb'))\n return lmt\n\n\ndef test_1():\n mdl = load_model('Pullover', 'Coat', 'FMNIST', 'LMT')\n images, labels = load_data_new('Pullover', 'Coat', train=False, dataset\n ='FMNIST')\n images = images.view(-1, 784)\n forward = mdl.forward(images)\n logger.info('forward.size() => {}'.format(forward.size()))\n prob = mdl.predict_positive(images)\n logger.info('prob.size() => {}'.format(prob.size()))\n\n\nif __name__ == '__main__':\n test_1()\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import defaultdict\nimport os\nimport torch\nimport numpy as np\nimport pickle\nfrom sklearn.linear_model import Ridge, Lasso\nfrom biplnn.log import getLogger\nfrom biplnn.utils import load_data_new, load_model, PathManager, load_data_new\nfrom biplnn.mdl.lmt import LinearModelTree\nfrom biplnn import config\nlogger = getLogger(__name__)\nIMG_SIZE = 28\nMIN_NODE_SIZE = 15\nMIN_SPLIT_IMPROVEMENT = 10\n\n\ndef fit_linear_model(x, y):\n logger.info('Using Lasso')\n lr = Lasso(alpha=0.01)\n lr.fit(x, y)\n return SharedScalerModel(lr)\n\n\nclass SharedScalerModel:\n\n def __init__(self, lm):\n self.lm = lm\n self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.\n float64), dim=0)\n self.intercept_ = lm.intercept_\n\n def predict(self, X):\n return torch.tensor(self.lm.predict(X), dtype=torch.float64)\n\n\ndef train(c_1, c_2, id2pn, dataset):\n if not os.path.exists('../model'):\n os.mkdir('../model')\n mdl_name = '{}_{}'.format(c_1, c_2)\n logger.info('Train the model: {} {}'.format(mdl_name, id2pn))\n train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=\n dataset)\n train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()\n train_labels = torch.tensor([id2pn[i.item()] for i in train_labels],\n dtype=torch.float64)\n test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=\n dataset)\n test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)\n test_labels = np.array([id2pn[i.item()] for i in test_labels])\n counter = defaultdict(int)\n for i in train_labels:\n counter[i.item()] += 1\n train_pos, train_neg = counter[1], counter[0]\n counter = defaultdict(int)\n for i in test_labels:\n counter[i.item()] += 1\n test_pos, test_neg = counter[1], counter[0]\n logger.info('Train_labels {} '.format(train_labels))\n logger.info('Test_labels {} '.format(test_labels))\n logger.info(\n \"\"\"\n ======================================================================\n Data Information\n & \\\\# Positive & \\\\# Negative & \\\\# Positive & \\\\# Negative \\\\\n \\\\hline\n {} {} & {} & {} & {} & {} \\\\\n \\\\hline\n ======================================================================\n \"\"\"\n .format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))\n logger.info('train_labels {}'.format(train_labels))\n lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model,\n min_split_improvement=MIN_SPLIT_IMPROVEMENT)\n lmt.build_tree(train_data, train_labels)\n logger.info('Finish building trees')\n lmt.merge_lrs(lmt.root)\n logger.info('Finish merging trees')\n path_manager = PathManager(mdl_name='LMT', c_1=c_1, c_2=c_2, dataset=\n dataset, if_train_set=None)\n model_path = path_manager.mdl_path()\n with open(model_path, 'wb') as f:\n pickle.dump(lmt, f)\n lmt = load_model(c_1, c_2, dataset=dataset, model_name='LMT')\n train_data = train_data.to(config.DEVICE)\n _test('LMT', lmt, train_data, train_labels, 'Trainset')\n _test('LMT', lmt, test_data, test_labels, 'Testset')\n\n\ndef _test(mdl_name, lmt, test_data, test_labels, if_train):\n y_pred = lmt.predict_positive(test_data)\n correct = 0\n for i in range(len(test_labels)):\n p_label = 1 if y_pred[i] > 0.5 else 0\n logger.debug('p_label: {} Prob: {} train_label: {}'.format(p_label,\n y_pred[i], test_labels[i]))\n if p_label == test_labels[i]:\n correct += 1\n precision = correct * 1.0 / len(test_labels)\n logger.info('[{} dataset] Model: {} Accuracy: {}/{}={}'.format(if_train,\n mdl_name, correct, len(test_labels), precision))\n\n\ndef load(model_path):\n lmt = pickle.load(open(model_path, 'rb'))\n return lmt\n\n\ndef test_1():\n mdl = load_model('Pullover', 'Coat', 'FMNIST', 'LMT')\n images, labels = load_data_new('Pullover', 'Coat', train=False, dataset\n ='FMNIST')\n images = images.view(-1, 784)\n forward = mdl.forward(images)\n logger.info('forward.size() => {}'.format(forward.size()))\n prob = mdl.predict_positive(images)\n logger.info('prob.size() => {}'.format(prob.size()))\n\n\nif __name__ == '__main__':\n test_1()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import defaultdict\nimport os\nimport torch\nimport numpy as np\nimport pickle\nfrom sklearn.linear_model import Ridge, Lasso\nfrom biplnn.log import getLogger\nfrom biplnn.utils import load_data_new, load_model, PathManager, load_data_new\nfrom biplnn.mdl.lmt import LinearModelTree\nfrom biplnn import config\n\nlogger = getLogger(__name__)\n\nIMG_SIZE = 28\nMIN_NODE_SIZE = 15\nMIN_SPLIT_IMPROVEMENT = 10\n\n\ndef fit_linear_model(x, y):\n logger.info(\"Using Lasso\")\n lr = Lasso(alpha=0.01)\n lr.fit(x, y)\n return SharedScalerModel(lr)\n\n\nclass SharedScalerModel:\n def __init__(self, lm):\n self.lm = lm\n self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.float64), dim=0)\n self.intercept_ = lm.intercept_\n\n def predict(self, X):\n return torch.tensor(self.lm.predict(X), dtype=torch.float64)\n\n\ndef train(c_1, c_2, id2pn, dataset):\n if not os.path.exists(\"../model\"):\n os.mkdir(\"../model\")\n mdl_name = \"{}_{}\".format(c_1, c_2)\n logger.info(\"Train the model: {} {}\".format(mdl_name, id2pn))\n train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=dataset)\n train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()\n train_labels = torch.tensor([id2pn[i.item()] for i in train_labels], dtype=torch.float64)\n\n test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=dataset)\n test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)\n test_labels = np.array([id2pn[i.item()] for i in test_labels])\n\n counter = defaultdict(int)\n for i in train_labels:\n counter[i.item()] += 1\n train_pos, train_neg = counter[1], counter[0]\n counter = defaultdict(int)\n for i in test_labels:\n counter[i.item()] += 1\n test_pos, test_neg = counter[1], counter[0]\n\n logger.info(\"Train_labels {} \".format(train_labels))\n logger.info(\"Test_labels {} \".format(test_labels))\n\n logger.info(\"\"\"\n ======================================================================\n Data Information\n & \\# Positive & \\# Negative & \\# Positive & \\# Negative \\\\\n \\hline\n {} {} & {} & {} & {} & {} \\\\\n \\hline\n ======================================================================\n \"\"\".format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))\n\n logger.info(\"train_labels {}\".format(train_labels))\n\n lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model, min_split_improvement=MIN_SPLIT_IMPROVEMENT)\n lmt.build_tree(train_data, train_labels)\n logger.info(\"Finish building trees\")\n lmt.merge_lrs(lmt.root)\n logger.info(\"Finish merging trees\")\n\n path_manager = PathManager(mdl_name=\"LMT\", c_1=c_1, c_2=c_2, dataset=dataset, if_train_set=None)\n model_path = path_manager.mdl_path()\n with open(model_path, \"wb\") as f:\n pickle.dump(lmt, f)\n\n lmt = load_model(c_1, c_2, dataset=dataset, model_name=\"LMT\")\n train_data = train_data.to(config.DEVICE)\n _test(\"LMT\", lmt, train_data, train_labels, \"Trainset\")\n _test(\"LMT\", lmt, test_data, test_labels, \"Testset\")\n\n\ndef _test(mdl_name, lmt, test_data, test_labels, if_train):\n y_pred = lmt.predict_positive(test_data)\n\n correct = 0\n for i in range(len(test_labels)):\n p_label = 1 if y_pred[i] > 0.5 else 0\n logger.debug(\"p_label: {} Prob: {} train_label: {}\".format(p_label, y_pred[i], test_labels[i]))\n if p_label == test_labels[i]:\n correct += 1\n precision = correct * 1.0 / len(test_labels)\n logger.info(\"[{} dataset] Model: {} Accuracy: {}/{}={}\".format(if_train, mdl_name, correct, len(test_labels), precision))\n\ndef load(model_path):\n lmt = pickle.load(open(model_path, \"rb\"))\n return lmt\n\n\ndef test_1():\n mdl = load_model(\"Pullover\", \"Coat\", \"FMNIST\", \"LMT\")\n images, labels = load_data_new(\"Pullover\", \"Coat\", train=False, dataset=\"FMNIST\")\n images = images.view(-1, 784)\n forward = mdl.forward(images)\n logger.info(\"forward.size() => {}\".format(forward.size()))\n prob = mdl.predict_positive(images)\n logger.info(\"prob.size() => {}\".format(prob.size()))\n\nif __name__ == '__main__':\n # main()\n # train_main(\"Pullover\", \"Coat\")\n # test(\"Pullover\", \"Coat\", FMNIST.id2pn_label(FMNIST.str2id(\"Pullover\"), FMNIST.str2id(\"Coat\")))\n test_1()\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
from string import Template
import os
#-----template objects-----
#for putting a template inside an ifdef guard
TIfGuard = Template("""if(${condition})
${innerbody}
endif()\n""")
#For minimum cmake version and project name
TProjectSettings = Template("""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n""")
#for including a definition
TDefinition = Template("add_definitions(-D${definition})")
#include directories
TIncludeDirectory = Template('include_directories("${dir}")')
#for globbing source files in a dir
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
#for globbing header files in a dir
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
#template for source group (so they appear in VS filters etc.
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
#writes the include for a submodule
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[":"]
for sm in submods:
sm = sm if sm.startswith('/') else "/"+sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
|
normal
|
{
"blob_id": "8cba57e3552e0072720fe42fa1949534f29d71b5",
"index": 1562,
"step-1": "<mask token>\n\n\ndef WriteToFile(f, output, condition=False, conditionID=''):\n f.write(output if not condition else WrapInGuard(conditionID, output))\n\n\n<mask token>\n\n\ndef WrapInGuard(condition, innerbody):\n return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))\n\n\n<mask token>\n\n\ndef WriteRequiredVariables(f):\n variables = [dict(var='INCLUDES', value='\"\"'), dict(var='SOURCES',\n value='\"\"'), dict(var='LIBS', value='\"\"')]\n for v in variables:\n f.write(TMakeVariable.substitute(v))\n\n\ndef WriteDefinitions(f, sections):\n for s in sections:\n defs = s.data[':']\n output = ''\n for d in defs:\n output += TDefinition.substitute(dict(definition=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\n<mask token>\n\n\ndef WriteProjectLibDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = d if d.startswith('/') else '/' + d\n d = rootDir + d\n output = TLinkDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteLinkLibs(f, rootDir, sections):\n for s in sections:\n libs = s.data[':']\n output = ''\n for l in libs:\n if '-framework' in l:\n frameworkName = l.replace('-framework ', '')\n frameworkName = frameworkName.strip()\n output = TLinkFramework.substitute(dict(framework=\n frameworkName)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-system' in l:\n systemLibName = l.replace('-system ', '')\n systemLibName = systemLibName.strip()\n output = TLinkSystemLib.substitute(dict(framework=\n systemLibName, framework_upper=systemLibName.upper())\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-object' in l:\n objectLibName = l.replace('-object ', '')\n objectLibName = objectLibName.strip()\n output = TLinkObject.substitute(dict(object=objectLibName)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n else:\n output = TAppendPythonVariable.substitute(dict(var='LIBS',\n appendedval=l))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteOutputs(f, rootDir, sections):\n for s in sections:\n if 'Executable' in s.data:\n runtime = s.data['Executable']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TRuntimeOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Runtime' in s.data:\n runtime = s.data['Runtime']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TExecutableOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Libs' in s.data:\n print('LIBS OUTPUT BEING SET')\n statics = s.data['Libs']\n if ContainsEnvVariable(statics):\n statics = InsertEnvVariable(statics)\n else:\n statics = statics if statics.startswith('/') else '/' + statics\n statics = rootDir + statics\n output = TLibraryoutput.substitute(dict(dir=statics))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteModuleOutput(f, rootDir, m):\n name = m.settings.data['Name']\n t = m.settings.data['Type']\n if 'exe' in t:\n f.write(TExecutable.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'shared' in t:\n f.write(TSharedLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'static' in t:\n f.write(TStaticLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'object' in t:\n f.write(TObjectLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef WriteToFile(f, output, condition=False, conditionID=''):\n f.write(output if not condition else WrapInGuard(conditionID, output))\n\n\n<mask token>\n\n\ndef Strip(s):\n chars = '${}'\n for i in range(0, len(chars)):\n s = s.replace(chars[i], '')\n return s\n\n\ndef WrapInGuard(condition, innerbody):\n return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))\n\n\ndef WriteProjectSettings(f, section):\n if 'UseFolders' not in section.data:\n section.data['UseFolders'] = 'OFF'\n output = TProjectSettings.substitute(section.data)\n f.write(output)\n\n\ndef WriteRequiredVariables(f):\n variables = [dict(var='INCLUDES', value='\"\"'), dict(var='SOURCES',\n value='\"\"'), dict(var='LIBS', value='\"\"')]\n for v in variables:\n f.write(TMakeVariable.substitute(v))\n\n\ndef WriteDefinitions(f, sections):\n for s in sections:\n defs = s.data[':']\n output = ''\n for d in defs:\n output += TDefinition.substitute(dict(definition=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteIncludeDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n localDir = d if d.startswith('/') else '/' + d\n headerID = Strip(localDir.replace('/', '_'))\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = rootDir + localDir\n output = TIncludeDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = TAppendVariable.substitute(dict(var='HEADERS',\n appendedval=headerID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n localDir = Strip(localDir.replace('/', '\\\\\\\\'))\n output = TSourceGroup.substitute(dict(folder='Header Files' +\n localDir, files=headerID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteSourceDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n localDir = d if d.startswith('/') else '/' + d\n sourceID = Strip(localDir.replace('/', '_'))\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = rootDir + localDir\n output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = TAppendVariable.substitute(dict(var='SOURCES',\n appendedval=sourceID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n localDir = Strip(localDir.replace('/', '\\\\\\\\'))\n output = TSourceGroup.substitute(dict(folder='Source Files' +\n localDir, files=sourceID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteProjectLibDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = d if d.startswith('/') else '/' + d\n d = rootDir + d\n output = TLinkDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteLinkLibs(f, rootDir, sections):\n for s in sections:\n libs = s.data[':']\n output = ''\n for l in libs:\n if '-framework' in l:\n frameworkName = l.replace('-framework ', '')\n frameworkName = frameworkName.strip()\n output = TLinkFramework.substitute(dict(framework=\n frameworkName)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-system' in l:\n systemLibName = l.replace('-system ', '')\n systemLibName = systemLibName.strip()\n output = TLinkSystemLib.substitute(dict(framework=\n systemLibName, framework_upper=systemLibName.upper())\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-object' in l:\n objectLibName = l.replace('-object ', '')\n objectLibName = objectLibName.strip()\n output = TLinkObject.substitute(dict(object=objectLibName)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n else:\n output = TAppendPythonVariable.substitute(dict(var='LIBS',\n appendedval=l))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteOutputs(f, rootDir, sections):\n for s in sections:\n if 'Executable' in s.data:\n runtime = s.data['Executable']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TRuntimeOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Runtime' in s.data:\n runtime = s.data['Runtime']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TExecutableOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Libs' in s.data:\n print('LIBS OUTPUT BEING SET')\n statics = s.data['Libs']\n if ContainsEnvVariable(statics):\n statics = InsertEnvVariable(statics)\n else:\n statics = statics if statics.startswith('/') else '/' + statics\n statics = rootDir + statics\n output = TLibraryoutput.substitute(dict(dir=statics))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteModuleOutput(f, rootDir, m):\n name = m.settings.data['Name']\n t = m.settings.data['Type']\n if 'exe' in t:\n f.write(TExecutable.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'shared' in t:\n f.write(TSharedLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'static' in t:\n f.write(TStaticLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'object' in t:\n f.write(TObjectLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef WriteToFile(f, output, condition=False, conditionID=''):\n f.write(output if not condition else WrapInGuard(conditionID, output))\n\n\ndef InsertEnvVariable(s):\n return Template(s).substitute(os.environ)\n\n\ndef ContainsEnvVariable(s):\n return '$' in s\n\n\ndef Strip(s):\n chars = '${}'\n for i in range(0, len(chars)):\n s = s.replace(chars[i], '')\n return s\n\n\ndef WrapInGuard(condition, innerbody):\n return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))\n\n\ndef WriteProjectSettings(f, section):\n if 'UseFolders' not in section.data:\n section.data['UseFolders'] = 'OFF'\n output = TProjectSettings.substitute(section.data)\n f.write(output)\n\n\ndef WriteRequiredVariables(f):\n variables = [dict(var='INCLUDES', value='\"\"'), dict(var='SOURCES',\n value='\"\"'), dict(var='LIBS', value='\"\"')]\n for v in variables:\n f.write(TMakeVariable.substitute(v))\n\n\ndef WriteDefinitions(f, sections):\n for s in sections:\n defs = s.data[':']\n output = ''\n for d in defs:\n output += TDefinition.substitute(dict(definition=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteIncludeDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n localDir = d if d.startswith('/') else '/' + d\n headerID = Strip(localDir.replace('/', '_'))\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = rootDir + localDir\n output = TIncludeDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = TAppendVariable.substitute(dict(var='HEADERS',\n appendedval=headerID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n localDir = Strip(localDir.replace('/', '\\\\\\\\'))\n output = TSourceGroup.substitute(dict(folder='Header Files' +\n localDir, files=headerID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteSourceDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n localDir = d if d.startswith('/') else '/' + d\n sourceID = Strip(localDir.replace('/', '_'))\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = rootDir + localDir\n output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = TAppendVariable.substitute(dict(var='SOURCES',\n appendedval=sourceID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n localDir = Strip(localDir.replace('/', '\\\\\\\\'))\n output = TSourceGroup.substitute(dict(folder='Source Files' +\n localDir, files=sourceID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteProjectLibDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = d if d.startswith('/') else '/' + d\n d = rootDir + d\n output = TLinkDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteLinkLibs(f, rootDir, sections):\n for s in sections:\n libs = s.data[':']\n output = ''\n for l in libs:\n if '-framework' in l:\n frameworkName = l.replace('-framework ', '')\n frameworkName = frameworkName.strip()\n output = TLinkFramework.substitute(dict(framework=\n frameworkName)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-system' in l:\n systemLibName = l.replace('-system ', '')\n systemLibName = systemLibName.strip()\n output = TLinkSystemLib.substitute(dict(framework=\n systemLibName, framework_upper=systemLibName.upper())\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-object' in l:\n objectLibName = l.replace('-object ', '')\n objectLibName = objectLibName.strip()\n output = TLinkObject.substitute(dict(object=objectLibName)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n else:\n output = TAppendPythonVariable.substitute(dict(var='LIBS',\n appendedval=l))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteOutputs(f, rootDir, sections):\n for s in sections:\n if 'Executable' in s.data:\n runtime = s.data['Executable']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TRuntimeOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Runtime' in s.data:\n runtime = s.data['Runtime']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TExecutableOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Libs' in s.data:\n print('LIBS OUTPUT BEING SET')\n statics = s.data['Libs']\n if ContainsEnvVariable(statics):\n statics = InsertEnvVariable(statics)\n else:\n statics = statics if statics.startswith('/') else '/' + statics\n statics = rootDir + statics\n output = TLibraryoutput.substitute(dict(dir=statics))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteModuleOutput(f, rootDir, m):\n name = m.settings.data['Name']\n t = m.settings.data['Type']\n if 'exe' in t:\n f.write(TExecutable.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'shared' in t:\n f.write(TSharedLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'static' in t:\n f.write(TStaticLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'object' in t:\n f.write(TObjectLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n return None\n\n\ndef WriteSubmoduleIncludes(f, rootDir, sections):\n for s in sections:\n submods = s.data[':']\n for sm in submods:\n sm = sm if sm.startswith('/') else '/' + sm\n output = TSubmoduleInclude.substitute(dict(dir=rootDir + sm)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n",
"step-4": "<mask token>\nTIfGuard = Template(\"\"\"if(${condition})\n${innerbody}\nendif()\n\"\"\")\nTProjectSettings = Template(\n \"\"\"cmake_minimum_required (VERSION ${MinCmakeVer})\nproject(${Name})\nset_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})\nset(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n\"\"\"\n )\nTDefinition = Template('add_definitions(-D${definition})')\nTIncludeDirectory = Template('include_directories(\"${dir}\")')\nTSourceGlob = Template('FILE(GLOB ${source_id} \"${dir}/*.c*\")')\nTHeaderGlob = Template('FILE(GLOB ${header_id} \"${dir}/*.h*\")')\nTSourceGroup = Template('source_group(\"${folder}\" FILES $${${files}})\\n')\nTExecutable = Template('add_executable(${project} $${SOURCES} $${HEADERS})\\n')\nTSharedLib = Template(\n 'add_library(${project} SHARED $${SOURCES} $${HEADERS})\\n')\nTStaticLib = Template(\n 'add_library(${project} STATIC $${SOURCES} $${HEADERS})\\n')\nTObjectLib = Template('add_library(${project} OBJECT $${SOURCES}')\nTAppendVariable = Template('set( ${var} $${${var}} $${${appendedval}})\\n')\nTAppendPythonVariable = Template('set( ${var} $${${var}} ${appendedval})\\n')\nTMakeVariable = Template('set (${var} ${value})\\n')\nTLinkDirectory = Template('link_directories(\"${dir}\")')\nTTargetLinkLibs = Template(\n \"\"\"if(NOT LIBS STREQUAL \"\")\ntarget_link_libraries(${name} $${LIBS})\nendif()\n\"\"\"\n )\nTLinkFramework = Template(\n \"\"\"find_library(${framework}_LIB ${framework})\nMARK_AS_ADVANCED(${framework}_LIB)\nset(LIBS $${LIBS} $${${framework}_LIB})\"\"\"\n )\nTLinkSystemLib = Template(\n \"\"\"find_package(${framework} REQUIRED)\ninclude_directories($${${framework_upper}_INCLUDE_DIRS})\nset(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})\"\"\"\n )\nTLinkObject = Template('set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})')\nTExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH \"${dir}\")\\n')\nTRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY \"${dir}\")\\n')\nTLibraryoutput = Template(\n \"\"\"set(CMAKE_LIBRARY_OUTPUT_DIRECTORY \"${dir}\")\nset(LIBRARY_OUTPUT_PATH \"${dir}\")\n\"\"\"\n )\nTSubmoduleInclude = Template('add_subdirectory(${dir})')\n\n\ndef WriteToFile(f, output, condition=False, conditionID=''):\n f.write(output if not condition else WrapInGuard(conditionID, output))\n\n\ndef InsertEnvVariable(s):\n return Template(s).substitute(os.environ)\n\n\ndef ContainsEnvVariable(s):\n return '$' in s\n\n\ndef Strip(s):\n chars = '${}'\n for i in range(0, len(chars)):\n s = s.replace(chars[i], '')\n return s\n\n\ndef WrapInGuard(condition, innerbody):\n return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))\n\n\ndef WriteProjectSettings(f, section):\n if 'UseFolders' not in section.data:\n section.data['UseFolders'] = 'OFF'\n output = TProjectSettings.substitute(section.data)\n f.write(output)\n\n\ndef WriteRequiredVariables(f):\n variables = [dict(var='INCLUDES', value='\"\"'), dict(var='SOURCES',\n value='\"\"'), dict(var='LIBS', value='\"\"')]\n for v in variables:\n f.write(TMakeVariable.substitute(v))\n\n\ndef WriteDefinitions(f, sections):\n for s in sections:\n defs = s.data[':']\n output = ''\n for d in defs:\n output += TDefinition.substitute(dict(definition=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteIncludeDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n localDir = d if d.startswith('/') else '/' + d\n headerID = Strip(localDir.replace('/', '_'))\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = rootDir + localDir\n output = TIncludeDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = TAppendVariable.substitute(dict(var='HEADERS',\n appendedval=headerID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n localDir = Strip(localDir.replace('/', '\\\\\\\\'))\n output = TSourceGroup.substitute(dict(folder='Header Files' +\n localDir, files=headerID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteSourceDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n localDir = d if d.startswith('/') else '/' + d\n sourceID = Strip(localDir.replace('/', '_'))\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = rootDir + localDir\n output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n output = TAppendVariable.substitute(dict(var='SOURCES',\n appendedval=sourceID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n localDir = Strip(localDir.replace('/', '\\\\\\\\'))\n output = TSourceGroup.substitute(dict(folder='Source Files' +\n localDir, files=sourceID))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteProjectLibDirectories(f, rootDir, sections):\n for s in sections:\n dirs = s.data[':']\n output = ''\n for d in dirs:\n if ContainsEnvVariable(d):\n d = InsertEnvVariable(d)\n else:\n d = d if d.startswith('/') else '/' + d\n d = rootDir + d\n output = TLinkDirectory.substitute(dict(dir=d)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteLinkLibs(f, rootDir, sections):\n for s in sections:\n libs = s.data[':']\n output = ''\n for l in libs:\n if '-framework' in l:\n frameworkName = l.replace('-framework ', '')\n frameworkName = frameworkName.strip()\n output = TLinkFramework.substitute(dict(framework=\n frameworkName)) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-system' in l:\n systemLibName = l.replace('-system ', '')\n systemLibName = systemLibName.strip()\n output = TLinkSystemLib.substitute(dict(framework=\n systemLibName, framework_upper=systemLibName.upper())\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n elif '-object' in l:\n objectLibName = l.replace('-object ', '')\n objectLibName = objectLibName.strip()\n output = TLinkObject.substitute(dict(object=objectLibName)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n else:\n output = TAppendPythonVariable.substitute(dict(var='LIBS',\n appendedval=l))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteOutputs(f, rootDir, sections):\n for s in sections:\n if 'Executable' in s.data:\n runtime = s.data['Executable']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TRuntimeOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Runtime' in s.data:\n runtime = s.data['Runtime']\n if ContainsEnvVariable(runtime):\n runtime = InsertEnvVariable(runtime)\n else:\n runtime = runtime if runtime.startswith('/') else '/' + runtime\n runtime = rootDir + runtime\n output = TExecutableOutput.substitute(dict(dir=runtime))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n if 'Libs' in s.data:\n print('LIBS OUTPUT BEING SET')\n statics = s.data['Libs']\n if ContainsEnvVariable(statics):\n statics = InsertEnvVariable(statics)\n else:\n statics = statics if statics.startswith('/') else '/' + statics\n statics = rootDir + statics\n output = TLibraryoutput.substitute(dict(dir=statics))\n WriteToFile(f, output, s.HasCondition(), s.condition)\n\n\ndef WriteModuleOutput(f, rootDir, m):\n name = m.settings.data['Name']\n t = m.settings.data['Type']\n if 'exe' in t:\n f.write(TExecutable.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'shared' in t:\n f.write(TSharedLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'static' in t:\n f.write(TStaticLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n elif 'object' in t:\n f.write(TObjectLib.substitute(dict(project=name)))\n f.write(TTargetLinkLibs.substitute(dict(name=name)))\n return None\n\n\ndef WriteSubmoduleIncludes(f, rootDir, sections):\n for s in sections:\n submods = s.data[':']\n for sm in submods:\n sm = sm if sm.startswith('/') else '/' + sm\n output = TSubmoduleInclude.substitute(dict(dir=rootDir + sm)\n ) + '\\n'\n WriteToFile(f, output, s.HasCondition(), s.condition)\n",
"step-5": "from string import Template\nimport os\n\n#-----template objects-----\n\n#for putting a template inside an ifdef guard\nTIfGuard = Template(\"\"\"if(${condition})\n${innerbody}\nendif()\\n\"\"\")\n\n#For minimum cmake version and project name\nTProjectSettings = Template(\"\"\"cmake_minimum_required (VERSION ${MinCmakeVer})\nproject(${Name})\nset_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})\nset(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\\n\"\"\")\n\n\n#for including a definition\nTDefinition = Template(\"add_definitions(-D${definition})\")\n\n#include directories\nTIncludeDirectory = Template('include_directories(\"${dir}\")')\n\n#for globbing source files in a dir\nTSourceGlob = Template('FILE(GLOB ${source_id} \"${dir}/*.c*\")')\n\n#for globbing header files in a dir\nTHeaderGlob = Template('FILE(GLOB ${header_id} \"${dir}/*.h*\")')\n\n#template for source group (so they appear in VS filters etc.\nTSourceGroup = Template('source_group(\"${folder}\" FILES $${${files}})\\n')\n\n#for outputting an executable\nTExecutable = Template(\"add_executable(${project} $${SOURCES} $${HEADERS})\\n\")\n\n#for outputting a shared library\nTSharedLib = Template(\"add_library(${project} SHARED $${SOURCES} $${HEADERS})\\n\")\n\n#for outputting a static library\nTStaticLib = Template(\"add_library(${project} STATIC $${SOURCES} $${HEADERS})\\n\")\n\n#for outputting a collection of code files to an object file\nTObjectLib = Template(\"add_library(${project} OBJECT $${SOURCES}\")\n\n#template for appending a cmake variable to another cmake variable\nTAppendVariable = Template(\"set( ${var} $${${var}} $${${appendedval}})\\n\")\n\n#template for appending a python variable to a cmake variable\nTAppendPythonVariable = Template(\"set( ${var} $${${var}} ${appendedval})\\n\")\n\n#template for setting cmake variable\nTMakeVariable = Template('set (${var} ${value})\\n')\n\n#template for adding a link directory\nTLinkDirectory = Template('link_directories(\"${dir}\")')\n\n#template for targeting link libs\nTTargetLinkLibs = Template(\"\"\"if(NOT LIBS STREQUAL \"\")\ntarget_link_libraries(${name} $${LIBS})\nendif()\n\"\"\")\n\n#for linking a framework on the mac\nTLinkFramework = Template(\"\"\"find_library(${framework}_LIB ${framework})\nMARK_AS_ADVANCED(${framework}_LIB)\nset(LIBS $${LIBS} $${${framework}_LIB})\"\"\")\n\n#for linking a system library\nTLinkSystemLib = Template(\"\"\"find_package(${framework} REQUIRED)\ninclude_directories($${${framework_upper}_INCLUDE_DIRS})\nset(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})\"\"\")\n\n#for linking objects into this module\nTLinkObject = Template(\"set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})\")\n\n#template for exectuable output\nTExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH \"${dir}\")\\n')\n\n#template for exectuable output\nTRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY \"${dir}\")\\n')\n\n#template for library output\nTLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY \"${dir}\")\\nset(LIBRARY_OUTPUT_PATH \"${dir}\")\\n')\n\n#template for including a submodule\nTSubmoduleInclude = Template('add_subdirectory(${dir})')\n\n#-----Helper Functions----\ndef WriteToFile(f, output, condition = False, conditionID = \"\"):\n\tf.write(output if not condition else WrapInGuard(conditionID, output))\n\ndef InsertEnvVariable(s):\n\treturn Template(s).substitute(os.environ)\n\ndef ContainsEnvVariable(s):\n\treturn (\"$\" in s)\n\n#removes all characters that may cause issues with cmake\n#such as ${} characters for environment variables\ndef Strip(s):\n\tchars = \"${}\"\n\tfor i in range(0,len(chars)):\n\t\ts=s.replace(chars[i],\"\")\n\treturn s\n\n#-----Write Functions-----\n#Puts innerbody into TIfGuard template with the given condition\n#then returns the string\ndef WrapInGuard(condition, innerbody):\n\treturn TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))\n\t\ndef WriteProjectSettings(f, section):\n\t#defaults\n\tif \"UseFolders\" not in section.data: section.data[\"UseFolders\"] = \"OFF\"\n\t\n\t#output\n\toutput = TProjectSettings.substitute(section.data)\n\tf.write(output)\n\t\n#writes required CMAKE variables to the file\ndef WriteRequiredVariables(f):\n\t#all required variables go here to initialise\n\tvariables = [\n\t\tdict(var=\"INCLUDES\", value='\"\"'), \n\t\tdict(var=\"SOURCES\", value='\"\"'), \n\t\tdict(var=\"LIBS\", value='\"\"') \n\t\t]\n\t\n\t#write them to file\t\n\tfor v in variables:\n\t\tf.write(TMakeVariable.substitute(v))\n\t\n#definitions such as #defines \t\ndef WriteDefinitions(f, sections):\n\t#first write the one which is not platform specific\n\tfor s in sections:\n\t\tdefs = s.data[\":\"]\n\t\t\n\t\t#gather definitions to be output\n\t\toutput = \"\"\n\t\tfor d in defs:\n\t\t\toutput += TDefinition.substitute(dict(definition=d)) + \"\\n\"\n\t\t\n\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\n#project include directories\ndef WriteIncludeDirectories(f, rootDir, sections):\n\t#first write the one which is not platform specific\n\tfor s in sections:\n\t\tdirs = s.data[\":\"]\n\t\t\n\t\t#gather definitions to be output\n\t\toutput = \"\"\n\t\tfor d in dirs:\n\t\t\tlocalDir = d if d.startswith(\"/\") else \"/\"+d\n\t\t\theaderID = Strip(localDir.replace('/','_'))\n\t\t\t\n\t\t\t#insert any environment variables\n\t\t\tif ContainsEnvVariable(d):\n\t\t\t\td = InsertEnvVariable(d)\n\t\t\telse:\n\t\t\t\td = rootDir + localDir\n\t\t\t\t\n\t\t\t#add include directory\n\t\t\toutput = TIncludeDirectory.substitute(dict(dir=d)) + \"\\n\"\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t#glob all header files\n\t\t\toutput = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + \"\\n\"\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t#append to HEADERS variable\n\t\t\toutput = TAppendVariable.substitute(dict(var=\"HEADERS\", appendedval=headerID))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t#make source group so they appear in filters\n\t\t\tlocalDir = Strip(localDir.replace('/','\\\\\\\\'))\n\t\t\toutput = TSourceGroup.substitute(dict(folder=\"Header Files\" + localDir, files=headerID))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\n#project source directories\ndef WriteSourceDirectories(f, rootDir, sections):\n\t#first write the one which is not platform specific\n\tfor s in sections:\n\t\tdirs = s.data[\":\"]\n\n\t\toutput = \"\"\n\t\tfor d in dirs:\n\t\t\tlocalDir = d if d.startswith(\"/\") else \"/\"+d\n\t\t\tsourceID = Strip(localDir.replace('/','_'))\n\t\t\t\n\t\t\t#insert any environment variables\n\t\t\tif ContainsEnvVariable(d):\n\t\t\t\td = InsertEnvVariable(d)\n\t\t\telse:\n\t\t\t\td = rootDir + localDir\n\t\t\t\t\n\t\t\t#glob all source files\n\t\t\toutput = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + \"\\n\"\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t#append globbed source files to SOURCES cmake variable\n\t\t\toutput = TAppendVariable.substitute(dict(var=\"SOURCES\", appendedval=sourceID))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t#make source group so they appear in filters\n\t\t\tlocalDir = Strip(localDir.replace('/','\\\\\\\\'))\n\t\t\toutput = TSourceGroup.substitute(dict(folder=\"Source Files\" + localDir, files=sourceID))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\n#includes local library directories \ndef WriteProjectLibDirectories(f, rootDir, sections):\n\t#first write the one which is not platform specific\n\tfor s in sections:\n\t\tdirs = s.data[\":\"]\n\n\t\toutput = \"\"\n\t\tfor d in dirs:\n\t\t\t#insert any environment variables\n\t\t\tif ContainsEnvVariable(d):\n\t\t\t\td = InsertEnvVariable(d)\n\t\t\telse:\n\t\t\t\td = d if d.startswith('/') else \"/\"+d\n\t\t\t\td = rootDir + d\n\t\t\t\t\n\t\t\t#include lib directory\n\t\t\toutput = TLinkDirectory.substitute(dict(dir=d)) + \"\\n\"\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\n#adds all libs to the LIBS cmake var\ndef WriteLinkLibs(f, rootDir, sections):\n\t#first write the one which is not platform specific\n\tfor s in sections:\n\t\tlibs = s.data[\":\"]\n\n\t\toutput = \"\"\n\t\tfor l in libs:\n\t\t\tif \"-framework\" in l:\n\t\t\t\tframeworkName = l.replace(\"-framework \", \"\")\n\t\t\t\tframeworkName = frameworkName.strip()\n\t\t\t\t\n\t\t\t\toutput = TLinkFramework.substitute(dict(framework=frameworkName)) +\"\\n\"\n\t\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\t\n\t\t\telif \"-system\" in l:\n\t\t\t\tsystemLibName = l.replace(\"-system \", \"\")\n\t\t\t\tsystemLibName = systemLibName.strip()\n\t\t\t\t\n\t\t\t\toutput = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +\"\\n\"\n\t\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\telif \"-object\" in l:\n\t\t\t\tobjectLibName = l.replace(\"-object \", \"\")\n\t\t\t\tobjectLibName = objectLibName.strip()\n\t\t\t\t\n\t\t\t\toutput = TLinkObject.substitute(dict(object=objectLibName)) +\"\\n\"\n\t\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\telse:\n\t\t\t\t#add to LIBS cmake var\n\t\t\t\toutput = TAppendPythonVariable.substitute(dict(var=\"LIBS\", appendedval=l))\n\t\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t\n\t\t\t\n#Writes the cmake runtime/lib etc. outputs\ndef WriteOutputs(f, rootDir, sections):\n\tfor s in sections:\n\t\tif \"Executable\" in s.data:\n\t\t\truntime = s.data[\"Executable\"]\n\t\t\t#insert any environment variables\n\t\t\tif ContainsEnvVariable(runtime):\n\t\t\t\truntime = InsertEnvVariable(runtime)\n\t\t\telse:\n\t\t\t\truntime = runtime if runtime.startswith('/') else \"/\"+runtime\n\t\t\t\truntime = rootDir + runtime\n\t\t\toutput = TRuntimeOutput.substitute(dict(dir=runtime))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\tif \"Runtime\" in s.data:\n\t\t\truntime = s.data[\"Runtime\"]\n\t\t\t#insert any environment variables\n\t\t\tif ContainsEnvVariable(runtime):\n\t\t\t\truntime = InsertEnvVariable(runtime)\n\t\t\telse:\n\t\t\t\truntime = runtime if runtime.startswith('/') else \"/\"+runtime\n\t\t\t\truntime = rootDir + runtime\n\t\t\toutput = TExecutableOutput.substitute(dict(dir=runtime))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\tif \"Libs\" in s.data:\n\t\t\tprint(\"LIBS OUTPUT BEING SET\")\n\t\t\tstatics = s.data[\"Libs\"]\n\t\t\t#insert any environment variables\n\t\t\tif ContainsEnvVariable(statics):\n\t\t\t\tstatics = InsertEnvVariable(statics)\n\t\t\telse:\n\t\t\t\tstatics = statics if statics.startswith('/') else \"/\"+statics\n\t\t\t\tstatics = rootDir + statics\n\t\t\toutput = TLibraryoutput.substitute(dict(dir=statics))\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)\n\t\t\t\n\t\t\t\n#Writes the module output section of the CmakeLists file\ndef WriteModuleOutput(f, rootDir, m):\n\tname = m.settings.data[\"Name\"]\t#name of lib/exe\n\tt = m.settings.data[\"Type\"]\t#build type (lib/exe)\n\tif \"exe\" in t:\n\t\tf.write(TExecutable.substitute(dict(project=name)))\n\t\tf.write(TTargetLinkLibs.substitute(dict(name=name)))\n\telif \"shared\" in t:\n\t\tf.write(TSharedLib.substitute(dict(project=name)))\n\t\tf.write(TTargetLinkLibs.substitute(dict(name=name)))\n\telif \"static\" in t:\n\t\tf.write(TStaticLib.substitute(dict(project=name)))\n\t\tf.write(TTargetLinkLibs.substitute(dict(name=name)))\n\telif \"object\" in t:\n\t\tf.write(TObjectLib.substitute(dict(project=name)))\n\t\tf.write(TTargetLinkLibs.substitute(dict(name=name)))\n\treturn None\n\t\n\n#writes the include for a submodule\ndef WriteSubmoduleIncludes(f, rootDir, sections):\n\tfor s in sections:\n\t\tsubmods = s.data[\":\"]\n\t\t\n\t\tfor sm in submods:\n\t\t\tsm = sm if sm.startswith('/') else \"/\"+sm\n\t\t\t\n\t\t\toutput = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + \"\\n\"\n\t\t\tWriteToFile(f,output, s.HasCondition(), s.condition)",
"step-ids": [
8,
12,
15,
16,
18
]
}
|
[
8,
12,
15,
16,
18
] |
import os
my_home = os.popen("echo $MYWORK_DIR").readlines()[0][:-1]
import numpy
from sys import path, argv
path.append("D:/Github/astrophy-research/mylib")
path.append("D:/Github/astrophy-research/multi_shear_detect")
path.append('%s/work/mylib' % my_home)
from Fourier_Quad import Fourier_Quad
# import h5py
# from plot_tool import Image_Plot
import tool_box
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1])*10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
# bytes for 10 double elements
nbytes = 2*signal_num*itemsize
else:
nbytes = 0
# on rank 0 of comm, create the contiguous shared block
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num)) # array filled with zero
fq = Fourier_Quad(12,123)
n = numpy.ones((source_num, ))
# for i in range(signal_num):
source = numpy.random.normal(signals[rank], sigma_1, source_num) + numpy.random.normal(-signals[rank]/100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8,scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
# result[2] = signals
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
# img = Image_Plot()
# img.subplots(1,1)
# img.axs[0][0].errorbar(signals, result[0], result[1])
# img.axs[0][0].plot([-0.06,0.06],[-0.06, 0.06])
# img.show_img()
|
normal
|
{
"blob_id": "1ffdc2845bc503c0a30407de444a152f8cc68d57",
"index": 1370,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npath.append('D:/Github/astrophy-research/mylib')\npath.append('D:/Github/astrophy-research/multi_shear_detect')\npath.append('%s/work/mylib' % my_home)\n<mask token>\nif rank == 0:\n nbytes = 2 * signal_num * itemsize\nelse:\n nbytes = 0\n<mask token>\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n",
"step-3": "<mask token>\nmy_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]\n<mask token>\npath.append('D:/Github/astrophy-research/mylib')\npath.append('D:/Github/astrophy-research/multi_shear_detect')\npath.append('%s/work/mylib' % my_home)\n<mask token>\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.Get_size()\nsource_num = int(argv[1]) * 10000\nsigma_1 = float(argv[2])\nsigma_2 = float(argv[3])\nsignal_num = numprocs\nsignals = numpy.linspace(-0.05, 0.05, signal_num)\nitemsize = MPI.DOUBLE.Get_size()\nif rank == 0:\n nbytes = 2 * signal_num * itemsize\nelse:\n nbytes = 0\nwin1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)\nbuf1, itemsize = win1.Shared_query(0)\nresult = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))\nfq = Fourier_Quad(12, 123)\nn = numpy.ones((source_num,))\nsource = numpy.random.normal(signals[rank], sigma_1, source_num\n ) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)\nsignal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]\nresult[:, rank] = signal_est\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n",
"step-4": "import os\nmy_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]\nimport numpy\nfrom sys import path, argv\npath.append('D:/Github/astrophy-research/mylib')\npath.append('D:/Github/astrophy-research/multi_shear_detect')\npath.append('%s/work/mylib' % my_home)\nfrom Fourier_Quad import Fourier_Quad\nimport tool_box\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.Get_size()\nsource_num = int(argv[1]) * 10000\nsigma_1 = float(argv[2])\nsigma_2 = float(argv[3])\nsignal_num = numprocs\nsignals = numpy.linspace(-0.05, 0.05, signal_num)\nitemsize = MPI.DOUBLE.Get_size()\nif rank == 0:\n nbytes = 2 * signal_num * itemsize\nelse:\n nbytes = 0\nwin1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)\nbuf1, itemsize = win1.Shared_query(0)\nresult = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))\nfq = Fourier_Quad(12, 123)\nn = numpy.ones((source_num,))\nsource = numpy.random.normal(signals[rank], sigma_1, source_num\n ) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)\nsignal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]\nresult[:, rank] = signal_est\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n",
"step-5": "import os\nmy_home = os.popen(\"echo $MYWORK_DIR\").readlines()[0][:-1]\nimport numpy\nfrom sys import path, argv\npath.append(\"D:/Github/astrophy-research/mylib\")\npath.append(\"D:/Github/astrophy-research/multi_shear_detect\")\npath.append('%s/work/mylib' % my_home)\nfrom Fourier_Quad import Fourier_Quad\n# import h5py\n# from plot_tool import Image_Plot\nimport tool_box\nfrom mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.Get_size()\n\nsource_num = int(argv[1])*10000\nsigma_1 = float(argv[2])\nsigma_2 = float(argv[3])\nsignal_num = numprocs\nsignals = numpy.linspace(-0.05, 0.05, signal_num)\n\nitemsize = MPI.DOUBLE.Get_size()\nif rank == 0:\n # bytes for 10 double elements\n nbytes = 2*signal_num*itemsize\nelse:\n nbytes = 0\n\n# on rank 0 of comm, create the contiguous shared block\nwin1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)\nbuf1, itemsize = win1.Shared_query(0)\nresult = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num)) # array filled with zero\n\nfq = Fourier_Quad(12,123)\nn = numpy.ones((source_num, ))\n# for i in range(signal_num):\nsource = numpy.random.normal(signals[rank], sigma_1, source_num) + numpy.random.normal(-signals[rank]/100, sigma_2, source_num)\nsignal_est = fq.find_shear(source, n, 8,scale=100, left=-0.08, right=0.08)[:2]\nresult[:, rank] = signal_est\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n # result[2] = signals\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n# img = Image_Plot()\n# img.subplots(1,1)\n# img.axs[0][0].errorbar(signals, result[0], result[1])\n# img.axs[0][0].plot([-0.06,0.06],[-0.06, 0.06])\n# img.show_img()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@contextmanager
def timer(name):
t0 = time.time()
yield
print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,
et=time.time() - t0, color='\x1b[1;33m', nocolor='\x1b[0m'))
<|reserved_special_token_0|>
@main.command()
@click.option('--path', type=click.Path(exists=True), default='data/cells')
def train(path):
dirs = [p for p in Path(path).iterdir() if p.is_dir()]
dataset = CellsDataset(dirs[:5], transform=train_transform())
plot_cells(*zip(*dataset))
model = build_model(max_epochs=2)
with timer('Train the model'):
model.fit(dataset)
infer(model, dataset, 'train')
model.set_params(batch_size=1)
test = CellsDataset(dirs[:2], transform=test_transform())
infer(model, test, 'test')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def timer(name):
t0 = time.time()
yield
print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,
et=time.time() - t0, color='\x1b[1;33m', nocolor='\x1b[0m'))
@click.group()
def main():
pass
def infer(model, dataset, title):
print(f'Infering for {title} set')
plot_cells(*zip(*dataset))
with timer('Predict the labels'):
preds = model.predict(dataset)
imgs, masks = zip(*dataset)
plot_cells(imgs, masks, preds)
@main.command()
@click.option('--path', type=click.Path(exists=True), default='data/cells')
def train(path):
dirs = [p for p in Path(path).iterdir() if p.is_dir()]
dataset = CellsDataset(dirs[:5], transform=train_transform())
plot_cells(*zip(*dataset))
model = build_model(max_epochs=2)
with timer('Train the model'):
model.fit(dataset)
infer(model, dataset, 'train')
model.set_params(batch_size=1)
test = CellsDataset(dirs[:2], transform=test_transform())
infer(model, test, 'test')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def timer(name):
t0 = time.time()
yield
print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,
et=time.time() - t0, color='\x1b[1;33m', nocolor='\x1b[0m'))
@click.group()
def main():
pass
def infer(model, dataset, title):
print(f'Infering for {title} set')
plot_cells(*zip(*dataset))
with timer('Predict the labels'):
preds = model.predict(dataset)
imgs, masks = zip(*dataset)
plot_cells(imgs, masks, preds)
@main.command()
@click.option('--path', type=click.Path(exists=True), default='data/cells')
def train(path):
dirs = [p for p in Path(path).iterdir() if p.is_dir()]
dataset = CellsDataset(dirs[:5], transform=train_transform())
plot_cells(*zip(*dataset))
model = build_model(max_epochs=2)
with timer('Train the model'):
model.fit(dataset)
infer(model, dataset, 'train')
model.set_params(batch_size=1)
test = CellsDataset(dirs[:2], transform=test_transform())
infer(model, test, 'test')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import time
import click
from contextlib import contextmanager
from pathlib import Path
from model.data import CellsDataset
from model.model import build_model, train_transform, test_transform
from model.vis import plot_cells
@contextmanager
def timer(name):
t0 = time.time()
yield
print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,
et=time.time() - t0, color='\x1b[1;33m', nocolor='\x1b[0m'))
@click.group()
def main():
pass
def infer(model, dataset, title):
print(f'Infering for {title} set')
plot_cells(*zip(*dataset))
with timer('Predict the labels'):
preds = model.predict(dataset)
imgs, masks = zip(*dataset)
plot_cells(imgs, masks, preds)
@main.command()
@click.option('--path', type=click.Path(exists=True), default='data/cells')
def train(path):
dirs = [p for p in Path(path).iterdir() if p.is_dir()]
dataset = CellsDataset(dirs[:5], transform=train_transform())
plot_cells(*zip(*dataset))
model = build_model(max_epochs=2)
with timer('Train the model'):
model.fit(dataset)
infer(model, dataset, 'train')
model.set_params(batch_size=1)
test = CellsDataset(dirs[:2], transform=test_transform())
infer(model, test, 'test')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import time
import click
from contextlib import contextmanager
from pathlib import Path
from model.data import CellsDataset
from model.model import build_model, train_transform, test_transform
from model.vis import plot_cells
@contextmanager
def timer(name):
t0 = time.time()
yield
print("{color}[{name}] done in {et:.0f} s{nocolor}".format(
name=name, et=time.time() - t0,
color='\033[1;33m', nocolor='\033[0m'))
@click.group()
def main():
pass
def infer(model, dataset, title):
print(f"Infering for {title} set")
plot_cells(*zip(*dataset))
with timer("Predict the labels"):
preds = model.predict(dataset)
imgs, masks = zip(*dataset)
plot_cells(imgs, masks, preds)
@main.command()
@click.option("--path", type=click.Path(exists=True), default="data/cells")
def train(path):
dirs = [p for p in Path(path).iterdir() if p.is_dir()]
dataset = CellsDataset(dirs[:5], transform=train_transform())
plot_cells(*zip(*dataset))
model = build_model(max_epochs=2)
with timer("Train the model"):
model.fit(dataset)
infer(model, dataset, "train")
# Infer for all types of images
model.set_params(batch_size=1)
test = CellsDataset(dirs[:2], transform=test_transform())
infer(model, test, "test")
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "5cc325758d5bd99ebe49c40af4d2e339bbf64044",
"index": 7508,
"step-1": "<mask token>\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,\n et=time.time() - t0, color='\\x1b[1;33m', nocolor='\\x1b[0m'))\n\n\n<mask token>\n\n\n@main.command()\n@click.option('--path', type=click.Path(exists=True), default='data/cells')\ndef train(path):\n dirs = [p for p in Path(path).iterdir() if p.is_dir()]\n dataset = CellsDataset(dirs[:5], transform=train_transform())\n plot_cells(*zip(*dataset))\n model = build_model(max_epochs=2)\n with timer('Train the model'):\n model.fit(dataset)\n infer(model, dataset, 'train')\n model.set_params(batch_size=1)\n test = CellsDataset(dirs[:2], transform=test_transform())\n infer(model, test, 'test')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,\n et=time.time() - t0, color='\\x1b[1;33m', nocolor='\\x1b[0m'))\n\n\n@click.group()\ndef main():\n pass\n\n\ndef infer(model, dataset, title):\n print(f'Infering for {title} set')\n plot_cells(*zip(*dataset))\n with timer('Predict the labels'):\n preds = model.predict(dataset)\n imgs, masks = zip(*dataset)\n plot_cells(imgs, masks, preds)\n\n\n@main.command()\n@click.option('--path', type=click.Path(exists=True), default='data/cells')\ndef train(path):\n dirs = [p for p in Path(path).iterdir() if p.is_dir()]\n dataset = CellsDataset(dirs[:5], transform=train_transform())\n plot_cells(*zip(*dataset))\n model = build_model(max_epochs=2)\n with timer('Train the model'):\n model.fit(dataset)\n infer(model, dataset, 'train')\n model.set_params(batch_size=1)\n test = CellsDataset(dirs[:2], transform=test_transform())\n infer(model, test, 'test')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,\n et=time.time() - t0, color='\\x1b[1;33m', nocolor='\\x1b[0m'))\n\n\n@click.group()\ndef main():\n pass\n\n\ndef infer(model, dataset, title):\n print(f'Infering for {title} set')\n plot_cells(*zip(*dataset))\n with timer('Predict the labels'):\n preds = model.predict(dataset)\n imgs, masks = zip(*dataset)\n plot_cells(imgs, masks, preds)\n\n\n@main.command()\n@click.option('--path', type=click.Path(exists=True), default='data/cells')\ndef train(path):\n dirs = [p for p in Path(path).iterdir() if p.is_dir()]\n dataset = CellsDataset(dirs[:5], transform=train_transform())\n plot_cells(*zip(*dataset))\n model = build_model(max_epochs=2)\n with timer('Train the model'):\n model.fit(dataset)\n infer(model, dataset, 'train')\n model.set_params(batch_size=1)\n test = CellsDataset(dirs[:2], transform=test_transform())\n infer(model, test, 'test')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import time\nimport click\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom model.data import CellsDataset\nfrom model.model import build_model, train_transform, test_transform\nfrom model.vis import plot_cells\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print('{color}[{name}] done in {et:.0f} s{nocolor}'.format(name=name,\n et=time.time() - t0, color='\\x1b[1;33m', nocolor='\\x1b[0m'))\n\n\n@click.group()\ndef main():\n pass\n\n\ndef infer(model, dataset, title):\n print(f'Infering for {title} set')\n plot_cells(*zip(*dataset))\n with timer('Predict the labels'):\n preds = model.predict(dataset)\n imgs, masks = zip(*dataset)\n plot_cells(imgs, masks, preds)\n\n\n@main.command()\n@click.option('--path', type=click.Path(exists=True), default='data/cells')\ndef train(path):\n dirs = [p for p in Path(path).iterdir() if p.is_dir()]\n dataset = CellsDataset(dirs[:5], transform=train_transform())\n plot_cells(*zip(*dataset))\n model = build_model(max_epochs=2)\n with timer('Train the model'):\n model.fit(dataset)\n infer(model, dataset, 'train')\n model.set_params(batch_size=1)\n test = CellsDataset(dirs[:2], transform=test_transform())\n infer(model, test, 'test')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import time\nimport click\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom model.data import CellsDataset\nfrom model.model import build_model, train_transform, test_transform\nfrom model.vis import plot_cells\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print(\"{color}[{name}] done in {et:.0f} s{nocolor}\".format(\n name=name, et=time.time() - t0,\n color='\\033[1;33m', nocolor='\\033[0m'))\n\n\n@click.group()\ndef main():\n pass\n\n\ndef infer(model, dataset, title):\n print(f\"Infering for {title} set\")\n plot_cells(*zip(*dataset))\n with timer(\"Predict the labels\"):\n preds = model.predict(dataset)\n\n imgs, masks = zip(*dataset)\n plot_cells(imgs, masks, preds)\n\n\n@main.command()\n@click.option(\"--path\", type=click.Path(exists=True), default=\"data/cells\")\ndef train(path):\n dirs = [p for p in Path(path).iterdir() if p.is_dir()]\n dataset = CellsDataset(dirs[:5], transform=train_transform())\n plot_cells(*zip(*dataset))\n\n model = build_model(max_epochs=2)\n with timer(\"Train the model\"):\n model.fit(dataset)\n\n infer(model, dataset, \"train\")\n\n # Infer for all types of images\n model.set_params(batch_size=1)\n test = CellsDataset(dirs[:2], transform=test_transform())\n infer(model, test, \"test\")\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def conv(dt1, dt2):
return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))
<|reserved_special_token_0|>
def rho(p1, p2):
return conv(p1, p2) / np.std(p1) / np.std(p2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print("""
----------------概率与统计--------------------""")
<|reserved_special_token_0|>
print("""
-----------------高斯分布-------------------""")
def mode(x, mu, std):
return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)
print("""
---------------散点图---------------------""")
<|reserved_special_token_0|>
print("""
-----------------协方差-------------------""")
<|reserved_special_token_0|>
def conv(dt1, dt2):
return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))
print('conv', conv(x5, x6))
print("""
----------------线性相关系数--------------------""")
def rho(p1, p2):
return conv(p1, p2) / np.std(p1) / np.std(p2)
print("""
---------------坐标轴旋转---------------------""")
print('rho', rho(x5, x6))
print("""
---------------信息熵---------------------""")
<|reserved_special_token_0|>
plt.plot(px, y)
plt.show()
<|reserved_special_token_1|>
print("""
----------------概率与统计--------------------""")
<|reserved_special_token_0|>
iris = sd.load_iris()
x1 = np.random.random([10000])
x2 = np.random.normal(2, 1, [10000])
x3 = np.random.normal(5, 1, [10000])
x1_mu = np.mean(x1)
x1_std = np.std(x1)
x2_mu = np.mean(x2)
x2_std = np.std(x2)
x3_mu = np.mean(x3)
x3_std = np.std(x3)
print("""
-----------------高斯分布-------------------""")
def mode(x, mu, std):
return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)
print("""
---------------散点图---------------------""")
x5 = np.random.normal(1, 1, [1000])
x6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000])
print("""
-----------------协方差-------------------""")
rand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10
rand2 = np.random.normal(1, 3, size=[1000]) * 10
def conv(dt1, dt2):
return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))
print('conv', conv(x5, x6))
print("""
----------------线性相关系数--------------------""")
def rho(p1, p2):
return conv(p1, p2) / np.std(p1) / np.std(p2)
print("""
---------------坐标轴旋转---------------------""")
print('rho', rho(x5, x6))
print("""
---------------信息熵---------------------""")
px = np.linspace(0.01, 0.99, 1000)
y = px * np.log(1 / px) + (1 - px) * np.log(1 / (1 - px))
plt.plot(px, y)
plt.show()
<|reserved_special_token_1|>
print("""
----------------概率与统计--------------------""")
import numpy as np
import scipy
import sympy as sym
import matplotlib.pyplot as plt
import sklearn.datasets as sd
iris = sd.load_iris()
x1 = np.random.random([10000])
x2 = np.random.normal(2, 1, [10000])
x3 = np.random.normal(5, 1, [10000])
x1_mu = np.mean(x1)
x1_std = np.std(x1)
x2_mu = np.mean(x2)
x2_std = np.std(x2)
x3_mu = np.mean(x3)
x3_std = np.std(x3)
print("""
-----------------高斯分布-------------------""")
def mode(x, mu, std):
return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)
print("""
---------------散点图---------------------""")
x5 = np.random.normal(1, 1, [1000])
x6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000])
print("""
-----------------协方差-------------------""")
rand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10
rand2 = np.random.normal(1, 3, size=[1000]) * 10
def conv(dt1, dt2):
return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))
print('conv', conv(x5, x6))
print("""
----------------线性相关系数--------------------""")
def rho(p1, p2):
return conv(p1, p2) / np.std(p1) / np.std(p2)
print("""
---------------坐标轴旋转---------------------""")
print('rho', rho(x5, x6))
print("""
---------------信息熵---------------------""")
px = np.linspace(0.01, 0.99, 1000)
y = px * np.log(1 / px) + (1 - px) * np.log(1 / (1 - px))
plt.plot(px, y)
plt.show()
<|reserved_special_token_1|>
print('\n----------------概率与统计--------------------')
import numpy as np
import scipy
import sympy as sym
import matplotlib.pyplot as plt
import sklearn.datasets as sd
iris = sd.load_iris()
x1 = np.random.random([10000]) # 均匀分布
x2 = np.random.normal(2, 1, [10000]) # 正态分布
x3 = np.random.normal(5, 1, [10000]) # 正态分布
# print(len(x1),len(x2))
# print(x1.shape,x2.shape)
# coin = np.random.randint(0, 3, [1000])
# print(coin)
# print(np.mean(coin))
# plt.hist(coin)
# plt.hist(x1, bins=20)
x1_mu = np.mean(x1)
x1_std = np.std(x1)
x2_mu = np.mean(x2)
x2_std = np.std(x2)
x3_mu = np.mean(x3)
x3_std = np.std(x3)
print('\n-----------------高斯分布-------------------')
def mode(x, mu, std):
return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)
# xplot = np.linspace(-2, 8, 10000)
# print(x2_mu, x2_std, x3_mu, x3_std)
# x1_guass = mode(xplot, x1_mu, x1_std)
# x2_guass = mode(xplot, x2_mu, x2_std)
# x3_guass = mode(xplot, x3_mu, x3_std)
# plt.plot(xplot, x1_guass)
# plt.plot(xplot, x2_guass)
# plt.plot(xplot, x3_guass)
# plt.hist(x1, bins=30, alpha=0.5, density=True)
# plt.hist(x2, bins=30, alpha=0.5, density=True)
# plt.hist(x3, bins=30, alpha=0.5, density=True)
# plt.show()
print('\n---------------散点图---------------------')
x5 = np.random.normal(1, 1, [1000])
x6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000]) # 噪声
# plt.scatter(x5, x6)
# plt.show()
print('\n-----------------协方差-------------------')
rand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10
rand2 = np.random.normal(1, 3, size=[1000]) * 10
# plt.hist(rand1, bins=30, alpha=0.5, density=True)
# plt.hist(rand2, bins=30, alpha=0.5, density=True)
# plt.show()
def conv(dt1, dt2):
return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))
print('conv', conv(x5, x6))
print('\n----------------线性相关系数--------------------')
def rho(p1, p2):
return conv(p1, p2) / np.std(p1) / np.std(p2)
print('\n---------------坐标轴旋转---------------------')
print('rho', rho(x5, x6))
# plt.scatter(x5, x6)
# plt.axis("equal")
# plt.show()
print('\n---------------信息熵---------------------')
px = np.linspace(0.01, 0.99, 1000)
y = px * np.log((1 / px)) + (1 - px) * np.log(1 / (1 - px))
plt.plot(px, y)
plt.show()
|
flexible
|
{
"blob_id": "1ab5c6a56ac229c5a9892a9848c62a9a19a0dda7",
"index": 3360,
"step-1": "<mask token>\n\n\ndef conv(dt1, dt2):\n return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))\n\n\n<mask token>\n\n\ndef rho(p1, p2):\n return conv(p1, p2) / np.std(p1) / np.std(p2)\n\n\n<mask token>\n",
"step-2": "print(\"\"\"\n----------------概率与统计--------------------\"\"\")\n<mask token>\nprint(\"\"\"\n-----------------高斯分布-------------------\"\"\")\n\n\ndef mode(x, mu, std):\n return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)\n\n\nprint(\"\"\"\n---------------散点图---------------------\"\"\")\n<mask token>\nprint(\"\"\"\n-----------------协方差-------------------\"\"\")\n<mask token>\n\n\ndef conv(dt1, dt2):\n return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))\n\n\nprint('conv', conv(x5, x6))\nprint(\"\"\"\n----------------线性相关系数--------------------\"\"\")\n\n\ndef rho(p1, p2):\n return conv(p1, p2) / np.std(p1) / np.std(p2)\n\n\nprint(\"\"\"\n---------------坐标轴旋转---------------------\"\"\")\nprint('rho', rho(x5, x6))\nprint(\"\"\"\n---------------信息熵---------------------\"\"\")\n<mask token>\nplt.plot(px, y)\nplt.show()\n",
"step-3": "print(\"\"\"\n----------------概率与统计--------------------\"\"\")\n<mask token>\niris = sd.load_iris()\nx1 = np.random.random([10000])\nx2 = np.random.normal(2, 1, [10000])\nx3 = np.random.normal(5, 1, [10000])\nx1_mu = np.mean(x1)\nx1_std = np.std(x1)\nx2_mu = np.mean(x2)\nx2_std = np.std(x2)\nx3_mu = np.mean(x3)\nx3_std = np.std(x3)\nprint(\"\"\"\n-----------------高斯分布-------------------\"\"\")\n\n\ndef mode(x, mu, std):\n return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)\n\n\nprint(\"\"\"\n---------------散点图---------------------\"\"\")\nx5 = np.random.normal(1, 1, [1000])\nx6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000])\nprint(\"\"\"\n-----------------协方差-------------------\"\"\")\nrand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10\nrand2 = np.random.normal(1, 3, size=[1000]) * 10\n\n\ndef conv(dt1, dt2):\n return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))\n\n\nprint('conv', conv(x5, x6))\nprint(\"\"\"\n----------------线性相关系数--------------------\"\"\")\n\n\ndef rho(p1, p2):\n return conv(p1, p2) / np.std(p1) / np.std(p2)\n\n\nprint(\"\"\"\n---------------坐标轴旋转---------------------\"\"\")\nprint('rho', rho(x5, x6))\nprint(\"\"\"\n---------------信息熵---------------------\"\"\")\npx = np.linspace(0.01, 0.99, 1000)\ny = px * np.log(1 / px) + (1 - px) * np.log(1 / (1 - px))\nplt.plot(px, y)\nplt.show()\n",
"step-4": "print(\"\"\"\n----------------概率与统计--------------------\"\"\")\nimport numpy as np\nimport scipy\nimport sympy as sym\nimport matplotlib.pyplot as plt\nimport sklearn.datasets as sd\niris = sd.load_iris()\nx1 = np.random.random([10000])\nx2 = np.random.normal(2, 1, [10000])\nx3 = np.random.normal(5, 1, [10000])\nx1_mu = np.mean(x1)\nx1_std = np.std(x1)\nx2_mu = np.mean(x2)\nx2_std = np.std(x2)\nx3_mu = np.mean(x3)\nx3_std = np.std(x3)\nprint(\"\"\"\n-----------------高斯分布-------------------\"\"\")\n\n\ndef mode(x, mu, std):\n return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)\n\n\nprint(\"\"\"\n---------------散点图---------------------\"\"\")\nx5 = np.random.normal(1, 1, [1000])\nx6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000])\nprint(\"\"\"\n-----------------协方差-------------------\"\"\")\nrand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10\nrand2 = np.random.normal(1, 3, size=[1000]) * 10\n\n\ndef conv(dt1, dt2):\n return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))\n\n\nprint('conv', conv(x5, x6))\nprint(\"\"\"\n----------------线性相关系数--------------------\"\"\")\n\n\ndef rho(p1, p2):\n return conv(p1, p2) / np.std(p1) / np.std(p2)\n\n\nprint(\"\"\"\n---------------坐标轴旋转---------------------\"\"\")\nprint('rho', rho(x5, x6))\nprint(\"\"\"\n---------------信息熵---------------------\"\"\")\npx = np.linspace(0.01, 0.99, 1000)\ny = px * np.log(1 / px) + (1 - px) * np.log(1 / (1 - px))\nplt.plot(px, y)\nplt.show()\n",
"step-5": "print('\\n----------------概率与统计--------------------')\nimport numpy as np\nimport scipy\nimport sympy as sym\nimport matplotlib.pyplot as plt\nimport sklearn.datasets as sd\n\niris = sd.load_iris()\nx1 = np.random.random([10000]) # 均匀分布\nx2 = np.random.normal(2, 1, [10000]) # 正态分布\nx3 = np.random.normal(5, 1, [10000]) # 正态分布\n# print(len(x1),len(x2))\n# print(x1.shape,x2.shape)\n\n# coin = np.random.randint(0, 3, [1000])\n# print(coin)\n# print(np.mean(coin))\n# plt.hist(coin)\n# plt.hist(x1, bins=20)\n\nx1_mu = np.mean(x1)\nx1_std = np.std(x1)\nx2_mu = np.mean(x2)\nx2_std = np.std(x2)\nx3_mu = np.mean(x3)\nx3_std = np.std(x3)\n\nprint('\\n-----------------高斯分布-------------------')\n\n\ndef mode(x, mu, std):\n return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)\n\n\n# xplot = np.linspace(-2, 8, 10000)\n# print(x2_mu, x2_std, x3_mu, x3_std)\n# x1_guass = mode(xplot, x1_mu, x1_std)\n# x2_guass = mode(xplot, x2_mu, x2_std)\n# x3_guass = mode(xplot, x3_mu, x3_std)\n# plt.plot(xplot, x1_guass)\n# plt.plot(xplot, x2_guass)\n# plt.plot(xplot, x3_guass)\n# plt.hist(x1, bins=30, alpha=0.5, density=True)\n# plt.hist(x2, bins=30, alpha=0.5, density=True)\n# plt.hist(x3, bins=30, alpha=0.5, density=True)\n# plt.show()\n\nprint('\\n---------------散点图---------------------')\nx5 = np.random.normal(1, 1, [1000])\nx6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000]) # 噪声\n# plt.scatter(x5, x6)\n# plt.show()\n\nprint('\\n-----------------协方差-------------------')\nrand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10\nrand2 = np.random.normal(1, 3, size=[1000]) * 10\n\n\n# plt.hist(rand1, bins=30, alpha=0.5, density=True)\n# plt.hist(rand2, bins=30, alpha=0.5, density=True)\n# plt.show()\n\ndef conv(dt1, dt2):\n return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))\n\n\nprint('conv', conv(x5, x6))\n\nprint('\\n----------------线性相关系数--------------------')\n\n\ndef rho(p1, p2):\n return conv(p1, p2) / np.std(p1) / np.std(p2)\n\n\nprint('\\n---------------坐标轴旋转---------------------')\nprint('rho', rho(x5, x6))\n# plt.scatter(x5, x6)\n# plt.axis(\"equal\")\n# plt.show()\n\nprint('\\n---------------信息熵---------------------')\npx = np.linspace(0.01, 0.99, 1000)\ny = px * np.log((1 / px)) + (1 - px) * np.log(1 / (1 - px))\nplt.plot(px, y)\nplt.show()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import array
from PIL import Image
from generic.editable import XEditable as Editable
class PLTT(Editable):
"""Palette information"""
FORMAT_16BIT = 3
FORMAT_256BIT = 4
def define(self, clr):
self.clr = clr
self.string('magic', length=4, default='PLTT') # not reversed
self.uint32('size_')
self.uint32('format')
self.uint32('extended')
self.uint32('datasize')
self.uint32('offset')
self.data = ''
def load(self, reader):
Editable.load(self, reader)
self.data = array.array('H', reader.read(self.datasize))
def save(self, writer):
writer = Editable.save(self, writer)
ofs = writer.tell()
writer.write(self.data.tostring())
writer.writePadding(ofs+self.datasize)
return writer
def get_palettes(self):
palettes = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = 0
for pal_id in range(len(self.data)/num):
palette = []
for i in range(num):
val = self.data[start+i]
palette.append((((val >> 0) & 0x1f) << 3,
((val >> 5) & 0x1f) << 3,
((val >> 10) & 0x1f) << 3,
255))
start += num
palettes.append(palette)
return palettes
def get_palette(self, pal_id, transparent=True):
palette = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id*num
for i in range(num):
if not num and transparent:
palette.append(chr(0)*4)
continue
val = self.data[start+i]
palette.append(chr(((val >> 0) & 0x1f) << 3) +
chr(((val >> 5) & 0x1f) << 3) +
chr(((val >> 10) & 0x1f) << 3) +
chr(255))
return palette
def set_palette(self, pal_id, palette):
"""
Parameters
----------
pal_id : int
palette : list of tuple
List of 4-/3-int-tuple colors
"""
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id*num
for i, color in enumerate(palette):
if i > num:
break
r, g, b = color[:3]
self.data[start+i] = ((r >> 3) |
(g >> 3 << 5) |
(b >> 3 << 10))
class NCLR(Editable):
"""2d color information
"""
def define(self):
self.string('magic', length=4, default='RLCN')
self.uint16('endian', default=0xFFFE)
self.uint16('version', default=0x101)
self.uint32('size_')
self.uint16('headersize', default=0x10)
self.uint16('numblocks', default=1)
self.pltt = PLTT(self)
def load(self, reader):
Editable.load(self, reader)
assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)
self.pltt.load(reader)
def save(self, writer=None):
writer = Editable.save(self, writer)
writer = self.pltt.save(writer)
return writer
def get_palette(self, pal_id=0, transparent=True):
return self.pltt.get_palette(pal_id, transparent)
def get_palettes(self):
return self.pltt.get_palettes()
def set_palette(self, pal_id, palette):
return self.pltt.set_palette(pal_id, palette)
|
normal
|
{
"blob_id": "2fadc5c90d1bae14c57fc3bf02582e12aa8abdf6",
"index": 790,
"step-1": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n <mask token>\n <mask token>\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n <mask token>\n <mask token>\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n",
"step-2": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n <mask token>\n <mask token>\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n <mask token>\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n",
"step-3": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n <mask token>\n <mask token>\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n\n def get_palette(self, pal_id, transparent=True):\n palette = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i in range(num):\n if not num and transparent:\n palette.append(chr(0) * 4)\n continue\n val = self.data[start + i]\n palette.append(chr((val >> 0 & 31) << 3) + chr((val >> 5 & 31) <<\n 3) + chr((val >> 10 & 31) << 3) + chr(255))\n return palette\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n",
"step-4": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n FORMAT_16BIT = 3\n FORMAT_256BIT = 4\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n\n def get_palette(self, pal_id, transparent=True):\n palette = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i in range(num):\n if not num and transparent:\n palette.append(chr(0) * 4)\n continue\n val = self.data[start + i]\n palette.append(chr((val >> 0 & 31) << 3) + chr((val >> 5 & 31) <<\n 3) + chr((val >> 10 & 31) << 3) + chr(255))\n return palette\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n",
"step-5": "\nimport array\n\nfrom PIL import Image\n\nfrom generic.editable import XEditable as Editable\n\n\nclass PLTT(Editable):\n \"\"\"Palette information\"\"\"\n FORMAT_16BIT = 3\n FORMAT_256BIT = 4\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT') # not reversed\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs+self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data)/num):\n palette = []\n for i in range(num):\n val = self.data[start+i]\n palette.append((((val >> 0) & 0x1f) << 3,\n ((val >> 5) & 0x1f) << 3,\n ((val >> 10) & 0x1f) << 3,\n 255))\n start += num\n palettes.append(palette)\n return palettes\n\n def get_palette(self, pal_id, transparent=True):\n palette = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id*num\n for i in range(num):\n if not num and transparent:\n palette.append(chr(0)*4)\n continue\n val = self.data[start+i]\n palette.append(chr(((val >> 0) & 0x1f) << 3) +\n chr(((val >> 5) & 0x1f) << 3) +\n chr(((val >> 10) & 0x1f) << 3) +\n chr(255))\n return palette\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id*num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start+i] = ((r >> 3) |\n (g >> 3 << 5) |\n (b >> 3 << 10))\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=0xFFFE)\n self.uint16('version', default=0x101)\n self.uint32('size_')\n self.uint16('headersize', default=0x10)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n",
"step-ids": [
13,
14,
15,
16,
19
]
}
|
[
13,
14,
15,
16,
19
] |
from model import WSD
from data_preprocessing import load_dataset, create_mapping_dictionary, reload_word_mapping,get_bn2wn,get_bn2wndomains, get_bn2lex
from typing import List, Dict, Tuple
from prova import convert_sentence_to_features_no_padding
import numpy as np
import os
from nltk.corpus import wordnet
mfs_counter = 0
def predict_babelnet(input_path : str, output_path : str, resources_path : str) -> None:
global mfs_counter
"""
DO NOT MODIFY THE SIGNATURE!
This is the skeleton of the prediction function.
The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
with your predictions in the "<id> <BABELSynset>" format (e.g. "d000.s000.t000 bn:01234567n").
The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.
If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: None
"""
print(">>>> BABELNET PREDICTION")
prediction_results, sentences_xml_elements = __predict(input_path,resources_path)
vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')
correctly_saved = 0
filename = os.path.normpath(input_path)
filename = filename.split(os.sep)[-1]
filename = filename[:-3]+"babelnet.gold.key.txt"
for index in range(len(prediction_results)):
correctly_saved += __write_result(filename,
sentences_xml_elements[index],
resources_path, output_path,
prediction_results[index][0][0],
vocab=vocab_label_bn,
enable_coarse_grained=1,
vocab_for_coarse=None)
print("Successfully saved {} out of {}".format(correctly_saved, len(prediction_results)))
del prediction_results
print("Of these, {} were MFS".format(mfs_counter))
mfs_counter = 0
return
def predict_wordnet_domains(input_path : str, output_path : str, resources_path : str) -> None:
"""
DO NOT MODIFY THE SIGNATURE!
This is the skeleton of the prediction function.
The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
with your predictions in the "<id> <wordnetDomain>" format (e.g. "d000.s000.t000 sport").
The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.
If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: None
"""
global mfs_counter
print(">>>> WORDNET DOMAINS PREDICTION")
prediction_results, sentences_xml_elements = __predict(input_path,resources_path)
vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')
correctly_saved = 0
bn2wndom = get_bn2wndomains()
filename = os.path.normpath(input_path)
filename = filename.split(os.sep)[-1]
filename = filename[:-3]+"wndomains.gold.key.txt"
for index in range(len(prediction_results)):
correctly_saved += __write_result(filename,
sentences_xml_elements[index],
resources_path, output_path,
prediction_results[index][1][0],
vocab=vocab_label_wndmn,
enable_coarse_grained=2,
vocab_for_coarse=bn2wndom)
print("Successfully saved {} out of {}".format(correctly_saved, len(prediction_results)))
del prediction_results
print("Of these, {} were MFS".format(mfs_counter))
mfs_counter = 0
return
def predict_lexicographer(input_path : str, output_path : str, resources_path : str) -> None:
"""
DO NOT MODIFY THE SIGNATURE!
This is the skeleton of the prediction function.
The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
with your predictions in the "<id> <lexicographerId>" format (e.g. "d000.s000.t000 noun.animal").
The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.
If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: None
"""
global mfs_counter
print(">>>> LEXICOGRAPHER PREDICTION")
prediction_results, sentences_xml_elements = __predict(input_path, resources_path)
vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')
correctly_saved = 0
filename = os.path.normpath(input_path)
filename = filename.split(os.sep)[-1]
bn2lex = get_bn2lex()
filename = filename[:-3] + "lexicon.gold.key.txt"
for index in range(len(prediction_results)):
correctly_saved += __write_result(filename,
sentences_xml_elements[index],
resources_path,output_path,
prediction_results[index][2][0],
vocab= vocab_label_lex,
enable_coarse_grained=3,
vocab_for_coarse=bn2lex)
print("Successfully saved {} out of {}".format(correctly_saved, len(prediction_results)))
del prediction_results
print("Of these, {} were MFS".format(mfs_counter))
mfs_counter = 0
return
def __predict(input_path : str, resources_path : str) -> Tuple:
"""
Actually predicts a sentence and returns the predictions in the requested formats
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: The actual prediction by the network
"""
train, etree_data = load_dataset(input_path)
train = [dato for dato in train if dato]
vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')
vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')
vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')
modello = WSD(resources_path+"/vocabularies/bert_vocab.txt", [len(vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)], dropout=0.1, recurrent_dropout=0.1,learning_rate=0.0003)
tokenizatore = modello.tokenizatore
modello.model.load_weights(resources_path+"/saved_model/model_20_2.14.h5")
to_return = []
sentences_xml_elements = etree_data.xpath("/*/*/*")
for sentence in train:
feature_1, feature_2, feature_3 = convert_sentence_to_features_no_padding(sentence,tokenizatore)
results = modello.model.predict(
{'input_word_ids': feature_1, 'input_mask': feature_2, 'segment_ids': feature_3},
verbose=1
)
to_return.append(results)
del vocab_label_lex
del vocab_label_wndmn
del vocab_label_bn
return to_return, sentences_xml_elements
def __write_result(filename: str,
frase,
resources_path: str,
outputh_path: str,
predictions,
vocab = None,
enable_coarse_grained: int = 1,
vocab_for_coarse = None) -> int:
"""
Write results in the file system
:param filename: the name of the file to save
:param frase: the object from which recover the sentence
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:param output_path: the path of the output file (where you save your predictions)
:param predictions: the predictions made by the system
:param vocab: the vocab needed for giving a sense
:param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:
1 --> Means I'm predicting with Babelnet. No extra precautions needed
2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class "factotum" is returned instead
3 --> Means I'm predicting with Lexicon. Need to consult the vocab.
:param vocab_for_coarse: The vocab in support of mode 2 or 3
:return: 1 if succeeds
"""
global mfs_counter
bn2wn = get_bn2wn()
lemma2wn = reload_word_mapping(resources_path+"/mapping/lemma2wn.txt")
to_write = []
for index, parola in enumerate(frase):
name = parola.xpath('name()')
if name == 'instance':
id = parola.get('id')
list_of_possible_senses_first_step = lemma2wn.get(parola.text)
if not list_of_possible_senses_first_step:
# MFS
the_actual_meaning = MFS(parola,
bn2wn,
vocab2=vocab_for_coarse,
pred_case=enable_coarse_grained)
mfs_counter += 1
to_write.append((id, the_actual_meaning))
continue
list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(list_of_possible_senses_first_step, bn2wn)
candidates,list_of_possible_senses_bn_version = create_custom_label(list_of_possible_senses_bn_version,
parola.text,
vocab,
predictions[index],
enable_coarse_grained=enable_coarse_grained)
the_actual_meaning = None
if candidates:
argmax = np.argmax(candidates)
the_actual_meaning = list_of_possible_senses_bn_version[argmax]
else:
#MFS
mfs_counter += 1
the_actual_meaning = MFS(parola,
bn2wn,
vocab2=vocab_for_coarse,
pred_case=enable_coarse_grained)
to_write.append((id, the_actual_meaning))
with open(outputh_path + "/"+filename, "a") as test_saving:
for tupla in to_write:
test_saving.write(tupla[0] + " " + tupla[1]+"\n")
del to_write
del lemma2wn
del bn2wn
return 1
def MFS(parola, vocab: Dict, vocab2:Dict = None, pred_case: int = 1) -> str:
"""
Returns the sense by applying the Most Frequent Sense (MFS) strategy
:param parola: the Element object to which associate a sense
:param vocab: the vocab needed for giving a sense
:param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained
:param pred_case: whether to adopt a "rollback" strategy such as MFS or not. Possible values:
1 --> Means I'm predicting with Babelnet. No extra precautions needed
2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class "factotum" is returned instead
3 --> Means I'm predicting with Lexicon. Need to consult the vocab.
:return: the chosen sense with the MFS technique
"""
pos = parola.get('pos')
pos_input = __decide_pos(pos)
wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)
try:
wordnet_object = wordnet_object[0]
except:
print(wordnet_object)
print(parola.text)
wn_synset = "wn:" + str(wordnet_object.offset()).zfill(8) + wordnet_object.pos()
the_actual_meaning = next(key for key, value in vocab.items() if wn_synset in value)
to_return = __extrapolate_value_for_MFS(the_actual_meaning,vocab=vocab2, pred_case=pred_case)
return to_return
def __extrapolate_value_for_MFS(value: object, pred_case: int = 1, vocab: Dict = None) -> str:
"""
Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction
:param value: The Value from which to extrapolate the actual meaning found
:param pred_case: whether to adopt a "rollback" strategy such as MFS or not. Possible values:
1 --> Means I'm predicting with Babelnet. No extra precautions needed
2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class "factotum" is returned instead
3 --> Means I'm predicting with Lexicon. Need to consult the vocab.
:param vocab: The vocab in support of mode 2 or 3.
:return: the actual meaning found with MFS
"""
the_meaning_to_explot = __type_checker(value)
if pred_case == 1:
return the_meaning_to_explot
if pred_case == 2:
to_return = vocab.get(the_meaning_to_explot)
return to_return[0] if to_return else "factotum"
if pred_case == 3:
to_return = vocab.get(the_meaning_to_explot)
return to_return[0]
def __type_checker(value: object) -> str:
"""
Checks the type of the object and, accordingly, returns it
:param value: the value to examinate
:return: a string that is the value expected
"""
if type(value) == str:
return value
if type(value) == list:
return value[0]
def __decide_pos(pos: str) -> str:
"""
Decides the WN representation of the given pos in input
:param pos: the pos to interpret with WordNet
:return: the WN representation of the given pos
"""
to_return = None
if pos == 'NOUN':
to_return = "n"
if pos == 'VERB':
to_return = 'v'
if pos == 'ADJ':
to_return = 'a'
if pos == 'ADV':
to_return = 'r'
return to_return
def convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) -> List:
"""
Cast the given list (which contains only WN ids) to Babelnet IDs
:param list_of_bn: the list to cast
:param vocab: the vocabulary to use to perform the conversion
:return: the converted list
"""
list_of_possible_senses_bn_version = []
for candidate in list_of_bn:
is_it_here = next(key for key, value in vocab.items() if candidate in value)
if is_it_here:
list_of_possible_senses_bn_version.append(is_it_here if type(is_it_here) == str else is_it_here[0])
return list_of_possible_senses_bn_version
def create_custom_label(list_of_possible_senses: List, word: str, vocab: Dict, predictions, enable_coarse_grained: int = 1) -> List:
"""
Converts the list of babelnet IDS to a number and outputs the converted list
:param list_of_possible_senses: the list that contains all the babelnet's IDs
:param word: the word for which we are predicting the sense in a specific moment
:param vocab: the vocabulary Word -> Serial to exploit for the conversion
:param predictions: the predictions made by the system
:param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:
1 --> The flow will still be the same
2,3 -> Flow will change, triggering the first step for the coarse-grained approach.
:return: a List with the IDs converted
"""
to_return = []
list_of_indices_to_delete = []
for indice in range(len(list_of_possible_senses)):
new_string = word + "_" + list_of_possible_senses[indice] if enable_coarse_grained == 1 else list_of_possible_senses[indice]
conversion = None
try:
conversion = int(vocab[new_string])
to_return.append(predictions[conversion])
except:
list_of_indices_to_delete.append(indice)
continue
if list_of_indices_to_delete:
list_of_possible_senses = [list_of_possible_senses[prov_index] for prov_index in range(len(list_of_possible_senses)) if prov_index not in list_of_indices_to_delete]
return to_return, list_of_possible_senses
if __name__ == "__main__":
predict_babelnet("/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml", "../output", "/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources")
#predict_wordnet_domains("/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml", "../output", "/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources")
#predict_lexicographer("/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml", "../output", "/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources")
|
normal
|
{
"blob_id": "e3631a2a003f98fbf05c45a019250e76d3366949",
"index": 2582,
"step-1": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\n<mask token>\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path: str, resources_path: str) ->Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path + '/vocabularies/bert_vocab.txt', [len(\n vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)],\n dropout=0.1, recurrent_dropout=0.1, learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path + '/saved_model/model_20_2.14.h5'\n )\n to_return = []\n sentences_xml_elements = etree_data.xpath('/*/*/*')\n for sentence in train:\n feature_1, feature_2, feature_3 = (\n convert_sentence_to_features_no_padding(sentence, tokenizatore))\n results = modello.model.predict({'input_word_ids': feature_1,\n 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1)\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\n<mask token>\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) ->List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in\n value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(\n is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path: str, resources_path: str) ->Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path + '/vocabularies/bert_vocab.txt', [len(\n vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)],\n dropout=0.1, recurrent_dropout=0.1, learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path + '/saved_model/model_20_2.14.h5'\n )\n to_return = []\n sentences_xml_elements = etree_data.xpath('/*/*/*')\n for sentence in train:\n feature_1, feature_2, feature_3 = (\n convert_sentence_to_features_no_padding(sentence, tokenizatore))\n results = modello.model.predict({'input_word_ids': feature_1,\n 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1)\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\ndef __decide_pos(pos: str) ->str:\n \"\"\"\n Decides the WN representation of the given pos in input\n :param pos: the pos to interpret with WordNet\n :return: the WN representation of the given pos\n \"\"\"\n to_return = None\n if pos == 'NOUN':\n to_return = 'n'\n if pos == 'VERB':\n to_return = 'v'\n if pos == 'ADJ':\n to_return = 'a'\n if pos == 'ADV':\n to_return = 'r'\n return to_return\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) ->List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in\n value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(\n is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\n\ndef create_custom_label(list_of_possible_senses: List, word: str, vocab:\n Dict, predictions, enable_coarse_grained: int=1) ->List:\n \"\"\"\n Converts the list of babelnet IDS to a number and outputs the converted list\n :param list_of_possible_senses: the list that contains all the babelnet's IDs\n :param word: the word for which we are predicting the sense in a specific moment\n :param vocab: the vocabulary Word -> Serial to exploit for the conversion\n :param predictions: the predictions made by the system\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:\n 1 --> The flow will still be the same\n 2,3 -> Flow will change, triggering the first step for the coarse-grained approach.\n :return: a List with the IDs converted\n \"\"\"\n to_return = []\n list_of_indices_to_delete = []\n for indice in range(len(list_of_possible_senses)):\n new_string = word + '_' + list_of_possible_senses[indice\n ] if enable_coarse_grained == 1 else list_of_possible_senses[indice\n ]\n conversion = None\n try:\n conversion = int(vocab[new_string])\n to_return.append(predictions[conversion])\n except:\n list_of_indices_to_delete.append(indice)\n continue\n if list_of_indices_to_delete:\n list_of_possible_senses = [list_of_possible_senses[prov_index] for\n prov_index in range(len(list_of_possible_senses)) if prov_index\n not in list_of_indices_to_delete]\n return to_return, list_of_possible_senses\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path: str, resources_path: str) ->Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path + '/vocabularies/bert_vocab.txt', [len(\n vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)],\n dropout=0.1, recurrent_dropout=0.1, learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path + '/saved_model/model_20_2.14.h5'\n )\n to_return = []\n sentences_xml_elements = etree_data.xpath('/*/*/*')\n for sentence in train:\n feature_1, feature_2, feature_3 = (\n convert_sentence_to_features_no_padding(sentence, tokenizatore))\n results = modello.model.predict({'input_word_ids': feature_1,\n 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1)\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\ndef __decide_pos(pos: str) ->str:\n \"\"\"\n Decides the WN representation of the given pos in input\n :param pos: the pos to interpret with WordNet\n :return: the WN representation of the given pos\n \"\"\"\n to_return = None\n if pos == 'NOUN':\n to_return = 'n'\n if pos == 'VERB':\n to_return = 'v'\n if pos == 'ADJ':\n to_return = 'a'\n if pos == 'ADV':\n to_return = 'r'\n return to_return\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) ->List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in\n value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(\n is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\n\ndef create_custom_label(list_of_possible_senses: List, word: str, vocab:\n Dict, predictions, enable_coarse_grained: int=1) ->List:\n \"\"\"\n Converts the list of babelnet IDS to a number and outputs the converted list\n :param list_of_possible_senses: the list that contains all the babelnet's IDs\n :param word: the word for which we are predicting the sense in a specific moment\n :param vocab: the vocabulary Word -> Serial to exploit for the conversion\n :param predictions: the predictions made by the system\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:\n 1 --> The flow will still be the same\n 2,3 -> Flow will change, triggering the first step for the coarse-grained approach.\n :return: a List with the IDs converted\n \"\"\"\n to_return = []\n list_of_indices_to_delete = []\n for indice in range(len(list_of_possible_senses)):\n new_string = word + '_' + list_of_possible_senses[indice\n ] if enable_coarse_grained == 1 else list_of_possible_senses[indice\n ]\n conversion = None\n try:\n conversion = int(vocab[new_string])\n to_return.append(predictions[conversion])\n except:\n list_of_indices_to_delete.append(indice)\n continue\n if list_of_indices_to_delete:\n list_of_possible_senses = [list_of_possible_senses[prov_index] for\n prov_index in range(len(list_of_possible_senses)) if prov_index\n not in list_of_indices_to_delete]\n return to_return, list_of_possible_senses\n\n\nif __name__ == '__main__':\n predict_babelnet(\n '/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml'\n , '../output',\n '/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources'\n )\n",
"step-5": "from model import WSD\nfrom data_preprocessing import load_dataset, create_mapping_dictionary, reload_word_mapping,get_bn2wn,get_bn2wndomains, get_bn2lex\nfrom typing import List, Dict, Tuple\nfrom prova import convert_sentence_to_features_no_padding\nimport numpy as np\nimport os\nfrom nltk.corpus import wordnet\n\n\nmfs_counter = 0\n\n\ndef predict_babelnet(input_path : str, output_path : str, resources_path : str) -> None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print(\">>>> BABELNET PREDICTION\")\n prediction_results, sentences_xml_elements = __predict(input_path,resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3]+\"babelnet.gold.key.txt\"\n for index in range(len(prediction_results)):\n\n correctly_saved += __write_result(filename,\n sentences_xml_elements[index],\n resources_path, output_path,\n prediction_results[index][0][0],\n vocab=vocab_label_bn,\n enable_coarse_grained=1,\n vocab_for_coarse=None)\n\n print(\"Successfully saved {} out of {}\".format(correctly_saved, len(prediction_results)))\n del prediction_results\n print(\"Of these, {} were MFS\".format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path : str, output_path : str, resources_path : str) -> None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print(\">>>> WORDNET DOMAINS PREDICTION\")\n prediction_results, sentences_xml_elements = __predict(input_path,resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3]+\"wndomains.gold.key.txt\"\n for index in range(len(prediction_results)):\n\n correctly_saved += __write_result(filename,\n sentences_xml_elements[index],\n resources_path, output_path,\n prediction_results[index][1][0],\n vocab=vocab_label_wndmn,\n enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n\n print(\"Successfully saved {} out of {}\".format(correctly_saved, len(prediction_results)))\n del prediction_results\n print(\"Of these, {} were MFS\".format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path : str, output_path : str, resources_path : str) -> None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print(\">>>> LEXICOGRAPHER PREDICTION\")\n prediction_results, sentences_xml_elements = __predict(input_path, resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + \"lexicon.gold.key.txt\"\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename,\n sentences_xml_elements[index],\n resources_path,output_path,\n prediction_results[index][2][0],\n vocab= vocab_label_lex,\n enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n\n print(\"Successfully saved {} out of {}\".format(correctly_saved, len(prediction_results)))\n del prediction_results\n print(\"Of these, {} were MFS\".format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path : str, resources_path : str) -> Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path+\"/vocabularies/bert_vocab.txt\", [len(vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)], dropout=0.1, recurrent_dropout=0.1,learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path+\"/saved_model/model_20_2.14.h5\")\n to_return = []\n sentences_xml_elements = etree_data.xpath(\"/*/*/*\")\n for sentence in train:\n feature_1, feature_2, feature_3 = convert_sentence_to_features_no_padding(sentence,tokenizatore)\n results = modello.model.predict(\n {'input_word_ids': feature_1, 'input_mask': feature_2, 'segment_ids': feature_3},\n verbose=1\n )\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str,\n frase,\n resources_path: str,\n outputh_path: str,\n predictions,\n vocab = None,\n enable_coarse_grained: int = 1,\n vocab_for_coarse = None) -> int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path+\"/mapping/lemma2wn.txt\")\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n # MFS\n the_actual_meaning = MFS(parola,\n bn2wn,\n vocab2=vocab_for_coarse,\n pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(list_of_possible_senses_first_step, bn2wn)\n\n candidates,list_of_possible_senses_bn_version = create_custom_label(list_of_possible_senses_bn_version,\n parola.text,\n vocab,\n predictions[index],\n enable_coarse_grained=enable_coarse_grained)\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n #MFS\n mfs_counter += 1\n the_actual_meaning = MFS(parola,\n bn2wn,\n vocab2=vocab_for_coarse,\n pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + \"/\"+filename, \"a\") as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + \" \" + tupla[1]+\"\\n\")\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2:Dict = None, pred_case: int = 1) -> str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = \"wn:\" + str(wordnet_object.offset()).zfill(8) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning,vocab=vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int = 1, vocab: Dict = None) -> str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else \"factotum\"\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\ndef __type_checker(value: object) -> str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\ndef __decide_pos(pos: str) -> str:\n \"\"\"\n Decides the WN representation of the given pos in input\n :param pos: the pos to interpret with WordNet\n :return: the WN representation of the given pos\n \"\"\"\n to_return = None\n if pos == 'NOUN':\n to_return = \"n\"\n if pos == 'VERB':\n to_return = 'v'\n if pos == 'ADJ':\n to_return = 'a'\n if pos == 'ADV':\n to_return = 'r'\n return to_return\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) -> List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\ndef create_custom_label(list_of_possible_senses: List, word: str, vocab: Dict, predictions, enable_coarse_grained: int = 1) -> List:\n \"\"\"\n Converts the list of babelnet IDS to a number and outputs the converted list\n :param list_of_possible_senses: the list that contains all the babelnet's IDs\n :param word: the word for which we are predicting the sense in a specific moment\n :param vocab: the vocabulary Word -> Serial to exploit for the conversion\n :param predictions: the predictions made by the system\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:\n 1 --> The flow will still be the same\n 2,3 -> Flow will change, triggering the first step for the coarse-grained approach.\n :return: a List with the IDs converted\n \"\"\"\n to_return = []\n list_of_indices_to_delete = []\n for indice in range(len(list_of_possible_senses)):\n new_string = word + \"_\" + list_of_possible_senses[indice] if enable_coarse_grained == 1 else list_of_possible_senses[indice]\n conversion = None\n try:\n conversion = int(vocab[new_string])\n to_return.append(predictions[conversion])\n except:\n list_of_indices_to_delete.append(indice)\n continue\n if list_of_indices_to_delete:\n list_of_possible_senses = [list_of_possible_senses[prov_index] for prov_index in range(len(list_of_possible_senses)) if prov_index not in list_of_indices_to_delete]\n return to_return, list_of_possible_senses\n\n\n\nif __name__ == \"__main__\":\n predict_babelnet(\"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml\", \"../output\", \"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources\")\n #predict_wordnet_domains(\"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml\", \"../output\", \"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources\")\n #predict_lexicographer(\"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml\", \"../output\", \"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources\")\n",
"step-ids": [
7,
9,
11,
12,
15
]
}
|
[
7,
9,
11,
12,
15
] |
<|reserved_special_token_0|>
class Manager(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self.
_printer, self._results_directory, self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(args
)
except IOError:
return test_run_results.RunDetails(exit_code=exit_codes.
NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(all_test_names), len(test_names), len
(tests_to_run), self._options.repeat_each, self._options.iterations
)
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run,
tests_to_skip, should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info('Finally stop servers and clean up')
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(self.
_port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(self
._port, self._options, self._expectations, initial_results,
all_retry_results, only_include_failing=True)
run_histories = test_run_results.test_run_histories(self._options,
self._expectations, initial_results, all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is test_run_results.
InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or
initial_results.total_failures):
self._port.show_results_html_file(self._filesystem.join
(self._artifacts_directory, 'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code,
summarized_full_results, summarized_failing_results,
initial_results, all_retry_results)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
<|reserved_special_token_0|>
def _is_http_test(self, test):
return (test.startswith(self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.
_port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self.
_expectations)
tests_to_run = [test for test in test_names if test not in
tests_to_skip]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(test_file, self._options.slow_timeout_ms if self.
_test_is_slow(test_file) else self._options.timeout_ms, self.
_test_requires_lock(test_file), retry_attempt=retry_attempt)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _needs_servers(self, test_names):
return any(self._is_http_test(test_name) for test_name in test_names)
<|reserved_special_token_0|>
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,
iterations, num_workers, retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(self._test_input_for_file(test,
retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = (self.
_results_directory)
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash
) or failure.has_log:
continue
crashed_processes.append([test, failure.process_name,
failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(test,
test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self.
_results_directory, artifacts_sub_dir, artifact_relative_path)
self._filesystem.maybe_make_directory(self._filesystem.dirname(
artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact('sample_file', self.
_filesystem.join(artifacts_sub_dir, artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,
start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(test_result.
artifacts, force_overwrite=True)
<|reserved_special_token_0|>
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results, running_all_tests,
run_histories):
_log.debug('Writing JSON files in %s.', self._artifacts_directory)
times_trie = json_results_generator.test_timings_trie(initial_results
.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(
bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(self.
_artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_jsonp_path, callback=
'ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self.
_artifacts_directory, 'failing_results.json')
json_results_generator.write_json(self._filesystem,
summarized_failing_results, failing_results_path, callback=
'ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results, self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(self._filesystem,
run_histories, self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Manager(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self.
_printer, self._results_directory, self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(args
)
except IOError:
return test_run_results.RunDetails(exit_code=exit_codes.
NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(all_test_names), len(test_names), len
(tests_to_run), self._options.repeat_each, self._options.iterations
)
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run,
tests_to_skip, should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info('Finally stop servers and clean up')
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(self.
_port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(self
._port, self._options, self._expectations, initial_results,
all_retry_results, only_include_failing=True)
run_histories = test_run_results.test_run_histories(self._options,
self._expectations, initial_results, all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is test_run_results.
InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or
initial_results.total_failures):
self._port.show_results_html_file(self._filesystem.join
(self._artifacts_directory, 'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code,
summarized_full_results, summarized_failing_results,
initial_results, all_retry_results)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _run_test_loop(self, tests_to_run, tests_to_skip):
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(self.
_artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = ('file://' + filename +
'-pretty-diff.html')
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return initial_results, all_retry_results
def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures
):
num_workers = int(self._port.num_workers(int(self._options.
child_processes)))
initial_results = self._run_tests(tests_to_run, tests_to_skip, self
._options.repeat_each, self._options.iterations, num_workers)
should_retry_failures = (should_retry_failures and not
initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...', grammar.
pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries)
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(), repeat_each=1, iterations=1,
num_workers=num_workers, retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return initial_results, all_retry_results
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
<|reserved_special_token_0|>
def _is_http_test(self, test):
return (test.startswith(self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.
_port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self.
_expectations)
tests_to_run = [test for test in test_names if test not in
tests_to_skip]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(test_file, self._options.slow_timeout_ms if self.
_test_is_slow(test_file) else self._options.timeout_ms, self.
_test_requires_lock(test_file), retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
is_slow_test = self._expectations.get_expectations(test_file
).is_slow_test
return is_slow_test or self._port.is_slow_wpt_test(test_file)
def _needs_servers(self, test_names):
return any(self._is_http_test(test_name) for test_name in test_names)
<|reserved_special_token_0|>
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,
iterations, num_workers, retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(self._test_input_for_file(test,
retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = (self.
_results_directory)
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash
) or failure.has_log:
continue
crashed_processes.append([test, failure.process_name,
failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(test,
test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self.
_results_directory, artifacts_sub_dir, artifact_relative_path)
self._filesystem.maybe_make_directory(self._filesystem.dirname(
artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact('sample_file', self.
_filesystem.join(artifacts_sub_dir, artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,
start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(test_result.
artifacts, force_overwrite=True)
<|reserved_special_token_0|>
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results, running_all_tests,
run_histories):
_log.debug('Writing JSON files in %s.', self._artifacts_directory)
times_trie = json_results_generator.test_timings_trie(initial_results
.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(
bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(self.
_artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_jsonp_path, callback=
'ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self.
_artifacts_directory, 'failing_results.json')
json_results_generator.write_json(self._filesystem,
summarized_failing_results, failing_results_path, callback=
'ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results, self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(self._filesystem,
run_histories, self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + '.version']
template_dir = self._path_finder.path_from_blink_tools('blinkpy',
'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != ResultType.Skip:
stats[result.test_name] = {'results': (_worker_number(
result.worker_name), result.test_number, result.pid,
int(result.test_run_time * 1000), int(result.
total_run_time * 1000))}
stats_trie = {}
for name, value in stats.items():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Manager(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self.
_printer, self._results_directory, self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(args
)
except IOError:
return test_run_results.RunDetails(exit_code=exit_codes.
NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(all_test_names), len(test_names), len
(tests_to_run), self._options.repeat_each, self._options.iterations
)
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run,
tests_to_skip, should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info('Finally stop servers and clean up')
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(self.
_port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(self
._port, self._options, self._expectations, initial_results,
all_retry_results, only_include_failing=True)
run_histories = test_run_results.test_run_histories(self._options,
self._expectations, initial_results, all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is test_run_results.
InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or
initial_results.total_failures):
self._port.show_results_html_file(self._filesystem.join
(self._artifacts_directory, 'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code,
summarized_full_results, summarized_failing_results,
initial_results, all_retry_results)
<|reserved_special_token_0|>
def _on_termination(self, signum, _frame):
self._printer.write_update('Received signal "%s" (%d) in %d' % (
signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(self.
_artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = ('file://' + filename +
'-pretty-diff.html')
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return initial_results, all_retry_results
def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures
):
num_workers = int(self._port.num_workers(int(self._options.
child_processes)))
initial_results = self._run_tests(tests_to_run, tests_to_skip, self
._options.repeat_each, self._options.iterations, num_workers)
should_retry_failures = (should_retry_failures and not
initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...', grammar.
pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries)
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(), repeat_each=1, iterations=1,
num_workers=num_workers, retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return initial_results, all_retry_results
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
<|reserved_special_token_0|>
def _is_http_test(self, test):
return (test.startswith(self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.
_port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self.
_expectations)
tests_to_run = [test for test in test_names if test not in
tests_to_skip]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(test_file, self._options.slow_timeout_ms if self.
_test_is_slow(test_file) else self._options.timeout_ms, self.
_test_requires_lock(test_file), retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
is_slow_test = self._expectations.get_expectations(test_file
).is_slow_test
return is_slow_test or self._port.is_slow_wpt_test(test_file)
def _needs_servers(self, test_names):
return any(self._is_http_test(test_name) for test_name in test_names)
<|reserved_special_token_0|>
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,
iterations, num_workers, retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(self._test_input_for_file(test,
retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any(self._is_http_test(test
) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={},
number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug('Flushing stdout')
sys.stdout.flush()
_log.debug('Flushing stderr')
sys.stderr.flush()
_log.debug('Cleaning up port')
self._port.clean_up_test_run()
if self._sink:
_log.debug('Closing sink')
self._sink.close()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = (self.
_results_directory)
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash
) or failure.has_log:
continue
crashed_processes.append([test, failure.process_name,
failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(test,
test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self.
_results_directory, artifacts_sub_dir, artifact_relative_path)
self._filesystem.maybe_make_directory(self._filesystem.dirname(
artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact('sample_file', self.
_filesystem.join(artifacts_sub_dir, artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,
start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(test_result.
artifacts, force_overwrite=True)
def _tests_to_retry(self, run_results):
return [result.test_name for result in run_results.
unexpected_results_by_name.values() if result.type !=
ResultType.Pass]
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results, running_all_tests,
run_histories):
_log.debug('Writing JSON files in %s.', self._artifacts_directory)
times_trie = json_results_generator.test_timings_trie(initial_results
.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(
bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(self.
_artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_jsonp_path, callback=
'ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self.
_artifacts_directory, 'failing_results.json')
json_results_generator.write_json(self._filesystem,
summarized_failing_results, failing_results_path, callback=
'ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results, self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(self._filesystem,
run_histories, self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + '.version']
template_dir = self._path_finder.path_from_blink_tools('blinkpy',
'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != ResultType.Skip:
stats[result.test_name] = {'results': (_worker_number(
result.worker_name), result.test_number, result.pid,
int(result.test_run_time * 1000), int(result.
total_run_time * 1000))}
stats_trie = {}
for name, value in stats.items():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_log = logging.getLogger(__name__)
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of web tests."""
HTTP_SUBDIR = 'http'
PERF_SUBDIR = 'perf'
WEBSOCKET_SUBDIR = 'websocket'
ARCHIVED_RESULTS_LIMIT = 25
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self.
_printer, self._results_directory, self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(args
)
except IOError:
return test_run_results.RunDetails(exit_code=exit_codes.
NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(all_test_names), len(test_names), len
(tests_to_run), self._options.repeat_each, self._options.iterations
)
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run,
tests_to_skip, should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info('Finally stop servers and clean up')
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(self.
_port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(self
._port, self._options, self._expectations, initial_results,
all_retry_results, only_include_failing=True)
run_histories = test_run_results.test_run_histories(self._options,
self._expectations, initial_results, all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is test_run_results.
InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or
initial_results.total_failures):
self._port.show_results_html_file(self._filesystem.join
(self._artifacts_directory, 'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code,
summarized_full_results, summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update('Received signal "%s" (%d) in %d' % (
signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(self.
_artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = ('file://' + filename +
'-pretty-diff.html')
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return initial_results, all_retry_results
def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures
):
num_workers = int(self._port.num_workers(int(self._options.
child_processes)))
initial_results = self._run_tests(tests_to_run, tests_to_skip, self
._options.repeat_each, self._options.iterations, num_workers)
should_retry_failures = (should_retry_failures and not
initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...', grammar.
pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries)
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(), repeat_each=1, iterations=1,
num_workers=num_workers, retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return initial_results, all_retry_results
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(args, test_lists=self._options.
test_list, filter_files=self._options.
isolated_script_test_filter_file, fastest_percentile=self.
_options.fastest, filters=self._options.isolated_script_test_filter
)
def _is_http_test(self, test):
return (test.startswith(self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.
_port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.
TEST_PATH_SEPARATOR in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self.
_expectations)
tests_to_run = [test for test in test_names if test not in
tests_to_skip]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(test_file, self._options.slow_timeout_ms if self.
_test_is_slow(test_file) else self._options.timeout_ms, self.
_test_requires_lock(test_file), retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
is_slow_test = self._expectations.get_expectations(test_file
).is_slow_test
return is_slow_test or self._port.is_slow_wpt_test(test_file)
def _needs_servers(self, test_names):
return any(self._is_http_test(test_name) for test_name in test_names)
def _set_up_run(self, test_names):
self._printer.write_update('Checking build ...')
if self._options.build:
exit_code = self._port.check_build(self._needs_servers(
test_names), self._printer)
if exit_code:
_log.error('Build check failed')
return exit_code
if self._options.clobber_old_results:
self._port.clobber_old_results()
elif self._filesystem.exists(self._artifacts_directory):
self._port.limit_archived_results_count()
self._port.rename_results_folder()
self._port.host.filesystem.maybe_make_directory(self.
_artifacts_directory)
exit_code = self._port.setup_test_run()
if exit_code:
_log.error('Build setup failed')
return exit_code
if not self._options.nocheck_sys_deps:
self._printer.write_update('Checking system dependencies ...')
exit_code = self._port.check_sys_deps()
if exit_code:
return exit_code
return exit_codes.OK_EXIT_STATUS
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,
iterations, num_workers, retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(self._test_input_for_file(test,
retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any(self._is_http_test(test
) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={},
number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug('Flushing stdout')
sys.stdout.flush()
_log.debug('Flushing stderr')
sys.stderr.flush()
_log.debug('Cleaning up port')
self._port.clean_up_test_run()
if self._sink:
_log.debug('Closing sink')
self._sink.close()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = (self.
_results_directory)
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash
) or failure.has_log:
continue
crashed_processes.append([test, failure.process_name,
failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(test,
test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self.
_results_directory, artifacts_sub_dir, artifact_relative_path)
self._filesystem.maybe_make_directory(self._filesystem.dirname(
artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact('sample_file', self.
_filesystem.join(artifacts_sub_dir, artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,
start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(test_result.
artifacts, force_overwrite=True)
def _tests_to_retry(self, run_results):
return [result.test_name for result in run_results.
unexpected_results_by_name.values() if result.type !=
ResultType.Pass]
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results, running_all_tests,
run_histories):
_log.debug('Writing JSON files in %s.', self._artifacts_directory)
times_trie = json_results_generator.test_timings_trie(initial_results
.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(
bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(self.
_artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(self._filesystem,
summarized_full_results, full_results_jsonp_path, callback=
'ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self.
_artifacts_directory, 'failing_results.json')
json_results_generator.write_json(self._filesystem,
summarized_failing_results, failing_results_path, callback=
'ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results, self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(self._filesystem,
run_histories, self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + '.version']
template_dir = self._path_finder.path_from_blink_tools('blinkpy',
'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != ResultType.Skip:
stats[result.test_name] = {'results': (_worker_number(
result.worker_name), result.test_number, result.pid,
int(result.test_run_time * 1000), int(result.
total_run_time * 1000))}
stats_trie = {}
for name, value in stats.items():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
<|reserved_special_token_1|>
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Manager orchestrates the overall process of running web tests.
This includes finding tests to run, reading the test expectations,
starting the required helper servers, deciding the order and way to
run the tests, retrying failed tests, and collecting the test results,
including crash logs and mismatches with expectations.
The Manager object has a constructor and one main method called run.
"""
import fnmatch
import json
import logging
import os
import random
import signal
import sys
import time
from blinkpy.common import exit_codes
from blinkpy.common.path_finder import PathFinder
from blinkpy.tool import grammar
from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
from blinkpy.web_tests.controllers.web_test_finder import WebTestFinder
from blinkpy.web_tests.controllers.web_test_runner import WebTestRunner
from blinkpy.web_tests.layout_package import json_results_generator
from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.models import test_run_results
from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.models.test_input import TestInput
_log = logging.getLogger(__name__)
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of web tests."""
HTTP_SUBDIR = 'http'
PERF_SUBDIR = 'perf'
WEBSOCKET_SUBDIR = 'websocket'
ARCHIVED_RESULTS_LIMIT = 25
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self._printer,
self._results_directory,
self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(
args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(
exit_code=exit_codes.NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
# Restore the test order to user specified order.
# base.tests() may change the order as it returns tests in the
# real, external/wpt, virtual order.
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
# Keep executing to produce valid (but empty) results.
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
# If --test-list is passed, or if no test narrowing is specified,
# default to 3 retries. Otherwise [e.g. if tests are being passed by
# name], default to 0 retries.
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run, tests_to_skip,
should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info("Finally stop servers and clean up")
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(
self._port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(
self._port,
self._options,
self._expectations,
initial_results,
all_retry_results,
only_include_failing=True)
run_histories = test_run_results.test_run_histories(
self._options, self._expectations, initial_results,
all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is
test_run_results.InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if (self._options.show_results
and (exit_code or initial_results.total_failures)):
self._port.show_results_html_file(
self._filesystem.join(self._artifacts_directory,
'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code, summarized_full_results,
summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update(
'Received signal "%s" (%d) in %d' %
(signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
# Don't show results in a new browser window because we're already
# printing the link to diffs in the loop
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(
self._artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return (initial_results, all_retry_results)
def _run_test_once(self, tests_to_run, tests_to_skip,
should_retry_failures):
num_workers = int(
self._port.num_workers(int(self._options.child_processes)))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each,
self._options.iterations, num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = (should_retry_failures
and not initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info(
'Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure',
len(tests_to_retry)), retry_attempt,
self._options.num_retries)
retry_results = self._run_tests(
tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return (initial_results, all_retry_results)
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(
args,
test_lists=self._options.test_list,
filter_files=self._options.isolated_script_test_filter_file,
fastest_percentile=self._options.fastest,
filters=self._options.isolated_script_test_filter)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)
or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR
+ self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test
or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names,
self._expectations)
tests_to_run = [
test for test in test_names if test not in tests_to_skip
]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(
test_file,
self._options.slow_timeout_ms
if self._test_is_slow(test_file) else self._options.timeout_ms,
self._test_requires_lock(test_file),
retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
is_slow_test = self._expectations.get_expectations(
test_file).is_slow_test
return is_slow_test or self._port.is_slow_wpt_test(test_file)
def _needs_servers(self, test_names):
return any(
self._is_http_test(test_name) for test_name in test_names)
def _set_up_run(self, test_names):
self._printer.write_update('Checking build ...')
if self._options.build:
exit_code = self._port.check_build(
self._needs_servers(test_names), self._printer)
if exit_code:
_log.error('Build check failed')
return exit_code
if self._options.clobber_old_results:
self._port.clobber_old_results()
elif self._filesystem.exists(self._artifacts_directory):
self._port.limit_archived_results_count()
# Rename the existing results folder for archiving.
self._port.rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(
self._artifacts_directory)
exit_code = self._port.setup_test_run()
if exit_code:
_log.error('Build setup failed')
return exit_code
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update('Checking system dependencies ...')
exit_code = self._port.check_sys_deps()
if exit_code:
return exit_code
return exit_codes.OK_EXIT_STATUS
def _run_tests(self,
tests_to_run,
tests_to_skip,
repeat_each,
iterations,
num_workers,
retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(
self._test_input_for_file(test, retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers,
retry_attempt)
def _start_servers(self, tests_to_run):
if any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if (self._port.requires_http_server()
or any(self._is_http_test(test) for test in tests_to_run)):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(
additional_dirs={},
number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug('Flushing stdout')
sys.stdout.flush()
_log.debug('Flushing stderr')
sys.stderr.flush()
_log.debug('Cleaning up port')
self._port.clean_up_test_run()
if self._sink:
_log.debug('Closing sink')
self._sink.close()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
# reset static variables for Failure type classes
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = self._results_directory
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if (not isinstance(failure, test_failures.FailureCrash)
or failure.has_log):
continue
crashed_processes.append(
[test, failure.process_name, failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(
test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self._results_directory,
artifacts_sub_dir,
artifact_relative_path)
self._filesystem.maybe_make_directory(
self._filesystem.dirname(artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact(
'sample_file',
self._filesystem.join(artifacts_sub_dir,
artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(
crashed_processes, start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(
test_result.artifacts, force_overwrite=True)
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING
# since retrying missing expectations is silly. But that's a bit tricky since we
# only consider the last retry attempt for the count of unexpected regressions.
return [
result.test_name
for result in run_results.unexpected_results_by_name.values()
if result.type != ResultType.Pass
]
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories):
_log.debug("Writing JSON files in %s.", self._artifacts_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(
initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(
self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(
self._filesystem, summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(
self._artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(
self._filesystem,
summarized_full_results,
full_results_jsonp_path,
callback='ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self._artifacts_directory,
'failing_results.json')
# We write failing_results.json out as jsonp because we need to load it
# from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(
self._filesystem,
summarized_failing_results,
failing_results_path,
callback='ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results,
self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(
self._filesystem, run_histories,
self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + ".version"]
template_dir = self._path_finder.path_from_blink_tools(
'blinkpy', 'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
# Note that the results.html template file won't exist when
# we're using a MockFileSystem during unit tests, so make sure
# it exists before we try to copy it.
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != ResultType.Skip:
stats[result.test_name] = {
'results': (_worker_number(result.worker_name),
result.test_number, result.pid,
int(result.test_run_time * 1000),
int(result.total_run_time * 1000))
}
stats_trie = {}
for name, value in stats.items():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
|
flexible
|
{
"blob_id": "08b57c00beb8dfedfee1bc032b8c281d7a151931",
"index": 8033,
"step-1": "<mask token>\n\n\nclass Manager(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n <mask token>\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n <mask token>\n <mask token>\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n <mask token>\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n <mask token>\n <mask token>\n <mask token>\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n <mask token>\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Manager(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n <mask token>\n <mask token>\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n self._options.show_results = False\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(self.\n _artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = ('file://' + filename +\n '-pretty-diff.html')\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n if user_input == 'q' or user_input == 'quit':\n return initial_results, all_retry_results\n\n def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures\n ):\n num_workers = int(self._port.num_workers(int(self._options.\n child_processes)))\n initial_results = self._run_tests(tests_to_run, tests_to_skip, self\n ._options.repeat_each, self._options.iterations, num_workers)\n should_retry_failures = (should_retry_failures and not\n initial_results.interrupted)\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n _log.info('')\n _log.info('Retrying %s, attempt %d of %d...', grammar.\n pluralize('unexpected failure', len(tests_to_retry)),\n retry_attempt, self._options.num_retries)\n retry_results = self._run_tests(tests_to_retry,\n tests_to_skip=set(), repeat_each=1, iterations=1,\n num_workers=num_workers, retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n tests_to_retry = self._tests_to_retry(retry_results)\n return initial_results, all_retry_results\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n <mask token>\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(test_file\n ).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n <mask token>\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n <mask token>\n <mask token>\n <mask token>\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n <mask token>\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + '.version']\n template_dir = self._path_finder.path_from_blink_tools('blinkpy',\n 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {'results': (_worker_number(\n result.worker_name), result.test_number, result.pid,\n int(result.test_run_time * 1000), int(result.\n total_run_time * 1000))}\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-3": "<mask token>\n\n\nclass Manager(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n <mask token>\n\n def _on_termination(self, signum, _frame):\n self._printer.write_update('Received signal \"%s\" (%d) in %d' % (\n signal.strsignal(signum), signum, os.getpid()))\n raise KeyboardInterrupt\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n self._options.show_results = False\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(self.\n _artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = ('file://' + filename +\n '-pretty-diff.html')\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n if user_input == 'q' or user_input == 'quit':\n return initial_results, all_retry_results\n\n def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures\n ):\n num_workers = int(self._port.num_workers(int(self._options.\n child_processes)))\n initial_results = self._run_tests(tests_to_run, tests_to_skip, self\n ._options.repeat_each, self._options.iterations, num_workers)\n should_retry_failures = (should_retry_failures and not\n initial_results.interrupted)\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n _log.info('')\n _log.info('Retrying %s, attempt %d of %d...', grammar.\n pluralize('unexpected failure', len(tests_to_retry)),\n retry_attempt, self._options.num_retries)\n retry_results = self._run_tests(tests_to_retry,\n tests_to_skip=set(), repeat_each=1, iterations=1,\n num_workers=num_workers, retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n tests_to_retry = self._tests_to_retry(retry_results)\n return initial_results, all_retry_results\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n <mask token>\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(test_file\n ).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n <mask token>\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n\n def _start_servers(self, tests_to_run):\n if any(self._port.is_wpt_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WPTServe ...')\n self._port.start_wptserve()\n self._wptserve_started = True\n if self._port.requires_http_server() or any(self._is_http_test(test\n ) for test in tests_to_run):\n self._printer.write_update('Starting HTTP server ...')\n self._port.start_http_server(additional_dirs={},\n number_of_drivers=self._options.max_locked_shards)\n self._http_server_started = True\n if any(self._is_websocket_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WebSocket server ...')\n self._port.start_websocket_server()\n self._websockets_server_started = True\n\n def _stop_servers(self):\n if self._wptserve_started:\n self._printer.write_update('Stopping WPTServe ...')\n self._wptserve_started = False\n self._port.stop_wptserve()\n if self._http_server_started:\n self._printer.write_update('Stopping HTTP server ...')\n self._http_server_started = False\n self._port.stop_http_server()\n if self._websockets_server_started:\n self._printer.write_update('Stopping WebSocket server ...')\n self._websockets_server_started = False\n self._port.stop_websocket_server()\n\n def _clean_up_run(self):\n _log.debug('Flushing stdout')\n sys.stdout.flush()\n _log.debug('Flushing stderr')\n sys.stderr.flush()\n _log.debug('Cleaning up port')\n self._port.clean_up_test_run()\n if self._sink:\n _log.debug('Closing sink')\n self._sink.close()\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n\n def _tests_to_retry(self, run_results):\n return [result.test_name for result in run_results.\n unexpected_results_by_name.values() if result.type !=\n ResultType.Pass]\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + '.version']\n template_dir = self._path_finder.path_from_blink_tools('blinkpy',\n 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {'results': (_worker_number(\n result.worker_name), result.test_number, result.pid,\n int(result.test_run_time * 1000), int(result.\n total_run_time * 1000))}\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-4": "<mask token>\n_log = logging.getLogger(__name__)\nTestExpectations = test_expectations.TestExpectations\n\n\nclass Manager(object):\n \"\"\"A class for managing running a series of web tests.\"\"\"\n HTTP_SUBDIR = 'http'\n PERF_SUBDIR = 'perf'\n WEBSOCKET_SUBDIR = 'websocket'\n ARCHIVED_RESULTS_LIMIT = 25\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n\n def _register_termination_handler(self):\n if self._port.host.platform.is_win():\n signum = signal.SIGBREAK\n else:\n signum = signal.SIGTERM\n signal.signal(signum, self._on_termination)\n\n def _on_termination(self, signum, _frame):\n self._printer.write_update('Received signal \"%s\" (%d) in %d' % (\n signal.strsignal(signum), signum, os.getpid()))\n raise KeyboardInterrupt\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n self._options.show_results = False\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(self.\n _artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = ('file://' + filename +\n '-pretty-diff.html')\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n if user_input == 'q' or user_input == 'quit':\n return initial_results, all_retry_results\n\n def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures\n ):\n num_workers = int(self._port.num_workers(int(self._options.\n child_processes)))\n initial_results = self._run_tests(tests_to_run, tests_to_skip, self\n ._options.repeat_each, self._options.iterations, num_workers)\n should_retry_failures = (should_retry_failures and not\n initial_results.interrupted)\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n _log.info('')\n _log.info('Retrying %s, attempt %d of %d...', grammar.\n pluralize('unexpected failure', len(tests_to_retry)),\n retry_attempt, self._options.num_retries)\n retry_results = self._run_tests(tests_to_retry,\n tests_to_skip=set(), repeat_each=1, iterations=1,\n num_workers=num_workers, retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n tests_to_retry = self._tests_to_retry(retry_results)\n return initial_results, all_retry_results\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n\n def _collect_tests(self, args):\n return self._finder.find_tests(args, test_lists=self._options.\n test_list, filter_files=self._options.\n isolated_script_test_filter_file, fastest_percentile=self.\n _options.fastest, filters=self._options.isolated_script_test_filter\n )\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(test_file\n ).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n\n def _set_up_run(self, test_names):\n self._printer.write_update('Checking build ...')\n if self._options.build:\n exit_code = self._port.check_build(self._needs_servers(\n test_names), self._printer)\n if exit_code:\n _log.error('Build check failed')\n return exit_code\n if self._options.clobber_old_results:\n self._port.clobber_old_results()\n elif self._filesystem.exists(self._artifacts_directory):\n self._port.limit_archived_results_count()\n self._port.rename_results_folder()\n self._port.host.filesystem.maybe_make_directory(self.\n _artifacts_directory)\n exit_code = self._port.setup_test_run()\n if exit_code:\n _log.error('Build setup failed')\n return exit_code\n if not self._options.nocheck_sys_deps:\n self._printer.write_update('Checking system dependencies ...')\n exit_code = self._port.check_sys_deps()\n if exit_code:\n return exit_code\n return exit_codes.OK_EXIT_STATUS\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n\n def _start_servers(self, tests_to_run):\n if any(self._port.is_wpt_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WPTServe ...')\n self._port.start_wptserve()\n self._wptserve_started = True\n if self._port.requires_http_server() or any(self._is_http_test(test\n ) for test in tests_to_run):\n self._printer.write_update('Starting HTTP server ...')\n self._port.start_http_server(additional_dirs={},\n number_of_drivers=self._options.max_locked_shards)\n self._http_server_started = True\n if any(self._is_websocket_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WebSocket server ...')\n self._port.start_websocket_server()\n self._websockets_server_started = True\n\n def _stop_servers(self):\n if self._wptserve_started:\n self._printer.write_update('Stopping WPTServe ...')\n self._wptserve_started = False\n self._port.stop_wptserve()\n if self._http_server_started:\n self._printer.write_update('Stopping HTTP server ...')\n self._http_server_started = False\n self._port.stop_http_server()\n if self._websockets_server_started:\n self._printer.write_update('Stopping WebSocket server ...')\n self._websockets_server_started = False\n self._port.stop_websocket_server()\n\n def _clean_up_run(self):\n _log.debug('Flushing stdout')\n sys.stdout.flush()\n _log.debug('Flushing stderr')\n sys.stderr.flush()\n _log.debug('Cleaning up port')\n self._port.clean_up_test_run()\n if self._sink:\n _log.debug('Closing sink')\n self._sink.close()\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n\n def _tests_to_retry(self, run_results):\n return [result.test_name for result in run_results.\n unexpected_results_by_name.values() if result.type !=\n ResultType.Pass]\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + '.version']\n template_dir = self._path_finder.path_from_blink_tools('blinkpy',\n 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {'results': (_worker_number(\n result.worker_name), result.test_number, result.pid,\n int(result.test_run_time * 1000), int(result.\n total_run_time * 1000))}\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-5": "# Copyright (C) 2010 Google Inc. All rights reserved.\n# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"The Manager orchestrates the overall process of running web tests.\n\nThis includes finding tests to run, reading the test expectations,\nstarting the required helper servers, deciding the order and way to\nrun the tests, retrying failed tests, and collecting the test results,\nincluding crash logs and mismatches with expectations.\n\nThe Manager object has a constructor and one main method called run.\n\"\"\"\n\nimport fnmatch\nimport json\nimport logging\nimport os\nimport random\nimport signal\nimport sys\nimport time\n\nfrom blinkpy.common import exit_codes\nfrom blinkpy.common.path_finder import PathFinder\nfrom blinkpy.tool import grammar\nfrom blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink\nfrom blinkpy.web_tests.controllers.web_test_finder import WebTestFinder\nfrom blinkpy.web_tests.controllers.web_test_runner import WebTestRunner\nfrom blinkpy.web_tests.layout_package import json_results_generator\nfrom blinkpy.web_tests.models import test_expectations\nfrom blinkpy.web_tests.models import test_failures\nfrom blinkpy.web_tests.models import test_run_results\nfrom blinkpy.web_tests.models.typ_types import ResultType\nfrom blinkpy.web_tests.models.test_input import TestInput\n\n_log = logging.getLogger(__name__)\n\nTestExpectations = test_expectations.TestExpectations\n\n\nclass Manager(object):\n \"\"\"A class for managing running a series of web tests.\"\"\"\n\n HTTP_SUBDIR = 'http'\n PERF_SUBDIR = 'perf'\n WEBSOCKET_SUBDIR = 'websocket'\n ARCHIVED_RESULTS_LIMIT = 25\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self._printer,\n self._results_directory,\n self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)\n\n def _register_termination_handler(self):\n if self._port.host.platform.is_win():\n signum = signal.SIGBREAK\n else:\n signum = signal.SIGTERM\n signal.signal(signum, self._on_termination)\n\n def _on_termination(self, signum, _frame):\n self._printer.write_update(\n 'Received signal \"%s\" (%d) in %d' %\n (signal.strsignal(signum), signum, os.getpid()))\n raise KeyboardInterrupt\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n # Don't show results in a new browser window because we're already\n # printing the link to diffs in the loop\n self._options.show_results = False\n\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(\n self._artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = 'file://' + filename + '-pretty-diff.html'\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n\n if user_input == 'q' or user_input == 'quit':\n return (initial_results, all_retry_results)\n\n def _run_test_once(self, tests_to_run, tests_to_skip,\n should_retry_failures):\n num_workers = int(\n self._port.num_workers(int(self._options.child_processes)))\n\n initial_results = self._run_tests(\n tests_to_run, tests_to_skip, self._options.repeat_each,\n self._options.iterations, num_workers)\n\n # Don't retry failures when interrupted by user or failures limit exception.\n should_retry_failures = (should_retry_failures\n and not initial_results.interrupted)\n\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n\n _log.info('')\n _log.info(\n 'Retrying %s, attempt %d of %d...',\n grammar.pluralize('unexpected failure',\n len(tests_to_retry)), retry_attempt,\n self._options.num_retries)\n\n retry_results = self._run_tests(\n tests_to_retry,\n tests_to_skip=set(),\n repeat_each=1,\n iterations=1,\n num_workers=num_workers,\n retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n\n tests_to_retry = self._tests_to_retry(retry_results)\n return (initial_results, all_retry_results)\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n\n def _collect_tests(self, args):\n return self._finder.find_tests(\n args,\n test_lists=self._options.test_list,\n filter_files=self._options.isolated_script_test_filter_file,\n fastest_percentile=self._options.fastest,\n filters=self._options.isolated_script_test_filter)\n\n def _is_http_test(self, test):\n return (\n test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)\n or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR\n + self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test\n or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names,\n self._expectations)\n tests_to_run = [\n test for test in test_names if test not in tests_to_skip\n ]\n\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(\n test_file,\n self._options.slow_timeout_ms\n if self._test_is_slow(test_file) else self._options.timeout_ms,\n self._test_requires_lock(test_file),\n retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(\n test_file).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(\n self._is_http_test(test_name) for test_name in test_names)\n\n def _set_up_run(self, test_names):\n self._printer.write_update('Checking build ...')\n if self._options.build:\n exit_code = self._port.check_build(\n self._needs_servers(test_names), self._printer)\n if exit_code:\n _log.error('Build check failed')\n return exit_code\n\n if self._options.clobber_old_results:\n self._port.clobber_old_results()\n elif self._filesystem.exists(self._artifacts_directory):\n self._port.limit_archived_results_count()\n # Rename the existing results folder for archiving.\n self._port.rename_results_folder()\n\n # Create the output directory if it doesn't already exist.\n self._port.host.filesystem.maybe_make_directory(\n self._artifacts_directory)\n\n exit_code = self._port.setup_test_run()\n if exit_code:\n _log.error('Build setup failed')\n return exit_code\n\n # Check that the system dependencies (themes, fonts, ...) are correct.\n if not self._options.nocheck_sys_deps:\n self._printer.write_update('Checking system dependencies ...')\n exit_code = self._port.check_sys_deps()\n if exit_code:\n return exit_code\n\n return exit_codes.OK_EXIT_STATUS\n\n def _run_tests(self,\n tests_to_run,\n tests_to_skip,\n repeat_each,\n iterations,\n num_workers,\n retry_attempt=0):\n\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(\n self._test_input_for_file(test, retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers,\n retry_attempt)\n\n def _start_servers(self, tests_to_run):\n if any(self._port.is_wpt_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WPTServe ...')\n self._port.start_wptserve()\n self._wptserve_started = True\n\n if (self._port.requires_http_server()\n or any(self._is_http_test(test) for test in tests_to_run)):\n self._printer.write_update('Starting HTTP server ...')\n self._port.start_http_server(\n additional_dirs={},\n number_of_drivers=self._options.max_locked_shards)\n self._http_server_started = True\n\n if any(self._is_websocket_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WebSocket server ...')\n self._port.start_websocket_server()\n self._websockets_server_started = True\n\n def _stop_servers(self):\n if self._wptserve_started:\n self._printer.write_update('Stopping WPTServe ...')\n self._wptserve_started = False\n self._port.stop_wptserve()\n if self._http_server_started:\n self._printer.write_update('Stopping HTTP server ...')\n self._http_server_started = False\n self._port.stop_http_server()\n if self._websockets_server_started:\n self._printer.write_update('Stopping WebSocket server ...')\n self._websockets_server_started = False\n self._port.stop_websocket_server()\n\n def _clean_up_run(self):\n _log.debug('Flushing stdout')\n sys.stdout.flush()\n _log.debug('Flushing stderr')\n sys.stderr.flush()\n _log.debug('Cleaning up port')\n self._port.clean_up_test_run()\n if self._sink:\n _log.debug('Closing sink')\n self._sink.close()\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n\n # reset static variables for Failure type classes\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = self._results_directory\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if (not isinstance(failure, test_failures.FailureCrash)\n or failure.has_log):\n continue\n crashed_processes.append(\n [test, failure.process_name, failure.pid])\n test_to_crash_failure[test] = failure\n\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(\n test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self._results_directory,\n artifacts_sub_dir,\n artifact_relative_path)\n self._filesystem.maybe_make_directory(\n self._filesystem.dirname(artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact(\n 'sample_file',\n self._filesystem.join(artifacts_sub_dir,\n artifact_relative_path))\n\n new_crash_logs = self._port.look_for_new_crash_logs(\n crashed_processes, start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(\n test_result.artifacts, force_overwrite=True)\n\n def _tests_to_retry(self, run_results):\n # TODO(ojan): This should also check that result.type != test_expectations.MISSING\n # since retrying missing expectations is silly. But that's a bit tricky since we\n # only consider the last retry attempt for the count of unexpected regressions.\n return [\n result.test_name\n for result in run_results.unexpected_results_by_name.values()\n if result.type != ResultType.Pass\n ]\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories):\n _log.debug(\"Writing JSON files in %s.\", self._artifacts_directory)\n\n # FIXME: Upload stats.json to the server and delete times_ms.\n times_trie = json_results_generator.test_timings_trie(\n initial_results.results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n\n # Save out the times data so we can use it for --fastest in the future.\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(\n self._filesystem.dirname(bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(\n self._filesystem, summarized_full_results, full_results_path)\n\n full_results_jsonp_path = self._filesystem.join(\n self._artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(\n self._filesystem,\n summarized_full_results,\n full_results_jsonp_path,\n callback='ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self._artifacts_directory,\n 'failing_results.json')\n # We write failing_results.json out as jsonp because we need to load it\n # from a file url for results.html and Chromium doesn't allow that.\n json_results_generator.write_json(\n self._filesystem,\n summarized_failing_results,\n failing_results_path,\n callback='ADD_RESULTS')\n\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results,\n self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(\n self._filesystem, run_histories,\n self._options.write_run_histories_to)\n\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + \".version\"]\n template_dir = self._path_finder.path_from_blink_tools(\n 'blinkpy', 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n # Note that the results.html template file won't exist when\n # we're using a MockFileSystem during unit tests, so make sure\n # it exists before we try to copy it.\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {\n 'results': (_worker_number(result.worker_name),\n result.test_number, result.pid,\n int(result.test_run_time * 1000),\n int(result.total_run_time * 1000))\n }\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-ids": [
14,
20,
25,
31,
33
]
}
|
[
14,
20,
25,
31,
33
] |
import sqlite3
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
if __name__ == '__main__':
con = sqlite3.connect('lab05-ex01.sqlite')
cursor = con.cursor()
cursor.execute("SELECT * FROM Pessoa")
print(cursor.fetchall())
nome = input("Nome da pessoa: ")
clausula = (nome,)
cursor.execute("SELECT * FROM Pessoa WHERE nome = ?", clausula)
pessoa = cursor.fetchall()
to_string(pessoa)
cursor.close()
con.close()
|
normal
|
{
"blob_id": "4246773a8da61ff21d5faa8ab8ad2d7e75fafb60",
"index": 3058,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\nif __name__ == '__main__':\n con = sqlite3.connect('lab05-ex01.sqlite')\n cursor = con.cursor()\n cursor.execute('SELECT * FROM Pessoa')\n print(cursor.fetchall())\n nome = input('Nome da pessoa: ')\n clausula = nome,\n cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n cursor.close()\n con.close()\n",
"step-4": "import sqlite3\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\nif __name__ == '__main__':\n con = sqlite3.connect('lab05-ex01.sqlite')\n cursor = con.cursor()\n cursor.execute('SELECT * FROM Pessoa')\n print(cursor.fetchall())\n nome = input('Nome da pessoa: ')\n clausula = nome,\n cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n cursor.close()\n con.close()\n",
"step-5": "import sqlite3\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\nif __name__ == '__main__':\n\n con = sqlite3.connect('lab05-ex01.sqlite')\n\n cursor = con.cursor()\n\n cursor.execute(\"SELECT * FROM Pessoa\")\n print(cursor.fetchall())\n\n nome = input(\"Nome da pessoa: \")\n clausula = (nome,)\n\n cursor.execute(\"SELECT * FROM Pessoa WHERE nome = ?\", clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n\n\n cursor.close()\n con.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.